repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
CNS-OIST/STEPS_Example | publication_models/API_1/Anwar_J Neurosci_2013/extra/constants_hh.py | 1 | 2809 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# constants_hh.py : provides a set of parameters and other constants for the
# Hodgkin-Huxley model in the above study.
# It is intended that this file is not altered.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import math
# # # # # # # # # # # # # # # # SIMULATION CONTROLS # # # # # # # # # # # # #
EF_DT = 1.0e-5 # The EField dt
NTIMEPOINTS = 5000
TIMECONVERTER = 1.0e-5
NITER = 1
############################ PARAMETERS ################################
init_pot = -65e-3
TEMPERATURE = 20.0
Q10 = 3
Qt = math.pow(Q10, ((TEMPERATURE-6.3)/10))
########## BULK RESISTIVITY ##########
Ra = 1.0
########## MEMBRANE CAPACITANCE ##########
memb_capac = 1.0e-2
# # # # # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # #
# Voltage range for gating kinetics in Volts
Vrange = [-100.0e-3, 50e-3, 1e-4]
# Hodgkin-Huxley gating kinetics
def a_n(V):
return ((0.01*(10-(V+65.))/(math.exp((10-(V+65.))/10.)-1)))
def b_n(V):
return ((0.125*math.exp(-(V+65.)/80.)))
def a_m(V):
return ((0.1*(25-(V+65.))/(math.exp((25-(V+65.))/10.)-1)))
def b_m(V):
return ((4.*math.exp(-(V+65.)/18.)))
def a_h(V):
return ((0.07*math.exp(-(V+65.)/20.)))
def b_h(V):
return ((1./(math.exp((30-(V+65.))/10.)+1)))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Potassium conductance = 0.036 S/cm2
# Sodium conductance = 0.120 S/cm2
# Potassium single-channel conductance
K_G = 20.0e-12 # Siemens
# Potassium channel density
K_ro = 18.0e12 # per square meter
# Potassium reversal potential
K_rev = -77e-3 # volts
# Sodium single-channel conductance
Na_G = 20.0e-12 # Siemens
# Sodium channel density
Na_ro = 60.0e12 # per square meter
# Sodium reversal potential
Na_rev = 50e-3 # volts
# Leak single-channel conductance
L_G = 1.0e-12 # Siemens
# Leak density
L_ro = 10.0e12 # per square meter
# Leak reveral potential
leak_rev = -50.0e-3 # volts
# A table of potassium channel initial population factors:
# n0, n1, n2, n3, n4
K_facs = [ 0.21768, 0.40513, 0.28093, 0.08647, 0.00979 ]
# A table of sodium channel initial population factors
# m0h0, m1h0, m2h0, m3h0, m0h1, m1h1, m2h1, m3h1:
Na_facs = [ 0.34412, 0.05733, 0.00327, 6.0e-05, \
0.50558, 0.08504, 0.00449, 0.00010 ]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| gpl-2.0 |
davidhax0r/Rocket | flask/lib/python2.7/site-packages/yaml/constructor.py | 391 | 25145 |
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
from error import *
from nodes import *
import datetime
import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = generator.next()
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
add_constructor = classmethod(add_constructor)
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
add_multi_constructor = classmethod(add_multi_constructor)
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == u'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
u'yes': True,
u'no': False,
u'true': True,
u'false': False,
u'on': True,
u'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
try:
return str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError), exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
node.start_mark)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class Constructor(SafeConstructor):
def construct_python_str(self, node):
return self.construct_scalar(node).encode('utf-8')
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_long(self, node):
return long(self.construct_yaml_int(node))
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
try:
__import__(name)
except ImportError, exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
return sys.modules[name]
def find_python_name(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = '__builtin__'
object_name = name
try:
__import__(module_name)
except ImportError, exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r" % (object_name.encode('utf-8'),
module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_module(suffix, node.start_mark)
class classobj: pass
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if newobj and isinstance(cls, type(self.classobj)) \
and not args and not kwds:
instance = self.classobj()
instance.__class__ = cls
return instance
elif newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
setattr(object, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
Constructor.construct_python_name)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/module:',
Constructor.construct_python_module)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
Constructor.construct_python_object)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/apply:',
Constructor.construct_python_object_apply)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
Constructor.construct_python_object_new)
| mit |
krikru/tensorflow-opencl | tensorflow/python/kernel_tests/stack_ops_test.py | 64 | 5580 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
return [nx, ny]
rx, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
r = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
LuckDragon82/demo | boilerplate/lib/basehandler.py | 8 | 12971 | # *-* coding: UTF-8 *-*
# standard library imports
import logging
import re
import traceback
import sys
# related third party imports
import webapp2
from google.appengine.api.users import NotAllowedError
from webapp2_extras import jinja2
from webapp2_extras import auth
from webapp2_extras import sessions
from google.appengine.api import taskqueue
# local application/library specific imports
from boilerplate import models
from boilerplate.lib import utils, i18n
from babel import Locale
def user_required(handler):
"""
Decorator for checking if there's a user associated
with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
"""
If handler has no login_url specified invoke a 403 error
"""
try:
auth = self.auth.get_user_by_session()
if not auth:
try:
self.auth_config['login_url'] = self.uri_for('login', continue_url=self.request.path)
self.redirect(self.auth_config['login_url'], abort=True)
except (AttributeError, KeyError), e:
self.abort(403)
else:
return handler(self, *args, **kwargs)
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return check_login
def generate_csrf_token():
session = sessions.get_store().get_session()
if '_csrf_token' not in session:
session['_csrf_token'] = utils.random_string()
return session['_csrf_token']
def jinja2_factory(app):
j = jinja2.Jinja2(app)
j.environment.filters.update({
# Set filters.
# ...
})
j.environment.globals.update({
# Set global variables.
'csrf_token' : generate_csrf_token,
'uri_for': webapp2.uri_for,
'getattr': getattr,
'str': str
})
j.environment.tests.update({
# Set test.
# ...
})
return j
def handle_error(request, response, exception):
exc_type, exc_value, exc_tb = sys.exc_info()
c = {
'exception': str(exception),
'url': request.url,
}
if request.app.config.get('send_mail_developer') is not False:
# send email
subject = "[{}] ERROR {}".format(request.app.config.get('environment').upper(), request.app.config.get('app_name'))
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
message = '<strong>Type:</strong> ' + exc_type.__name__ + "<br />" + \
'<strong>Description:</strong> ' + c['exception'] + "<br />" + \
'<strong>URL:</strong> ' + c['url'] + "<br />" + \
'<strong>Traceback:</strong> <br />' + '<br />'.join(lines)
email_body_path = "emails/error.txt"
if c['exception'] is not 'Error saving Email Log in datastore':
template_val = {
"app_name" : request.app.config.get('app_name'),
"message" : message,
}
email_body = jinja2.get_jinja2(factory=jinja2_factory, app=webapp2.get_app()).render_template(email_body_path, **template_val)
email_url = webapp2.uri_for('taskqueue-send-email')
for dev in request.app.config.get('developers'):
taskqueue.add(url = email_url, params={
'to': dev[1],
'subject' : subject,
'body' : email_body,
'sender' : request.app.config.get('contact_sender'),
})
status_int = hasattr(exception, 'status_int') and exception.status_int or 500
template = request.app.config.get('error_templates')[status_int]
t = jinja2.get_jinja2(factory=jinja2_factory, app=webapp2.get_app()).render_template(template, **c)
logging.error(str(status_int) + " - " + str(exception))
response.write(t)
response.set_status(status_int)
class ViewClass:
"""
ViewClass to insert variables into the template.
ViewClass is used in BaseHandler to promote variables automatically that can be used
in jinja2 templates.
Use case in a BaseHandler Class:
self.view.var1 = "hello"
self.view.array = [1, 2, 3]
self.view.dict = dict(a="abc", b="bcd")
Can be accessed in the template by just using the variables liek {{var1}} or {{dict.b}}
"""
pass
class BaseHandler(webapp2.RequestHandler):
"""
BaseHandler for all requests
Holds the auth and session properties so they
are reachable for all requests
"""
def __init__(self, request, response):
""" Override the initialiser in order to set the language.
"""
self.initialize(request, response)
self.locale = i18n.set_locale(self)
self.view = ViewClass()
def dispatch(self):
"""
Get a session store for this request.
"""
self.session_store = sessions.get_store(request=self.request)
try:
# csrf protection
if self.request.method == "POST" and not self.request.path.startswith('/taskqueue'):
token = self.session.get('_csrf_token')
if not token or token != self.request.get('_csrf_token'):
self.abort(403)
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def auth(self):
return auth.get_auth()
@webapp2.cached_property
def session_store(self):
return sessions.get_store(request=self.request)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
@webapp2.cached_property
def messages(self):
return self.session.get_flashes(key='_messages')
def add_message(self, message, level=None):
self.session.add_flash(message, level, key='_messages')
@webapp2.cached_property
def auth_config(self):
"""
Dict to hold urls for login/logout
"""
return {
'login_url': self.uri_for('login'),
'logout_url': self.uri_for('logout')
}
@webapp2.cached_property
def language(self):
return str(Locale.parse(self.locale).language)
@webapp2.cached_property
def user(self):
return self.auth.get_user_by_session()
@webapp2.cached_property
def user_id(self):
return str(self.user['user_id']) if self.user else None
@webapp2.cached_property
def user_key(self):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
return user_info.key
return None
@webapp2.cached_property
def username(self):
if self.user:
try:
user_info = models.User.get_by_id(long(self.user_id))
return str(user_info.username)
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return None
@webapp2.cached_property
def email(self):
if self.user:
try:
user_info = models.User.get_by_id(long(self.user_id))
return user_info.email
except AttributeError, e:
# avoid AttributeError when the session was delete from the server
logging.error(e)
self.auth.unset_session()
self.redirect_to('home')
return None
@webapp2.cached_property
def provider_uris(self):
login_urls = {}
continue_url = self.request.get('continue_url')
for provider in self.provider_info:
if continue_url:
login_url = self.uri_for("social-login", provider_name=provider, continue_url=continue_url)
else:
login_url = self.uri_for("social-login", provider_name=provider)
login_urls[provider] = login_url
return login_urls
@webapp2.cached_property
def provider_info(self):
return models.SocialUser.PROVIDERS_INFO
@webapp2.cached_property
def path_for_language(self):
"""
Get the current path + query_string without language parameter (hl=something)
Useful to put it on a template to concatenate with '&hl=NEW_LOCALE'
Example: .../?hl=en_US
"""
path_lang = re.sub(r'(^hl=(\w{5})\&*)|(\&hl=(\w{5})\&*?)', '', str(self.request.query_string))
return self.request.path + "?" if path_lang == "" else str(self.request.path) + "?" + path_lang
@property
def locales(self):
"""
returns a dict of locale codes to locale display names in both the current locale and the localized locale
example: if the current locale is es_ES then locales['en_US'] = 'Ingles (Estados Unidos) - English (United States)'
"""
if not self.app.config.get('locales'):
return None
locales = {}
for l in self.app.config.get('locales'):
current_locale = Locale.parse(self.locale)
language = current_locale.languages[l.split('_')[0]]
territory = current_locale.territories[l.split('_')[1]]
localized_locale_name = Locale.parse(l).display_name.capitalize()
locales[l] = language.capitalize() + " (" + territory.capitalize() + ") - " + localized_locale_name
return locales
@webapp2.cached_property
def is_mobile(self):
return utils.set_device_cookie_and_return_bool(self)
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(factory=jinja2_factory, app=self.app)
@webapp2.cached_property
def get_base_layout(self):
"""
Get the current base layout template for jinja2 templating. Uses the variable base_layout set in config
or if there is a base_layout defined, use the base_layout.
"""
return self.base_layout if hasattr(self, 'base_layout') else self.app.config.get('base_layout')
def set_base_layout(self, layout):
"""
Set the base_layout variable, thereby overwriting the default layout template name in config.py.
"""
self.base_layout = layout
def render_template(self, filename, **kwargs):
locales = self.app.config.get('locales') or []
locale_iso = None
language = ''
territory = ''
language_id = self.app.config.get('app_lang')
if self.locale and len(locales) > 1:
locale_iso = Locale.parse(self.locale)
language_id = locale_iso.language
territory_id = locale_iso.territory
language = locale_iso.languages[language_id]
territory = locale_iso.territories[territory_id]
# make all self.view variables available in jinja2 templates
if hasattr(self, 'view'):
kwargs.update(self.view.__dict__)
# set or overwrite special vars for jinja templates
kwargs.update({
'google_analytics_domain' : self.app.config.get('google_analytics_domain'),
'google_analytics_code' : self.app.config.get('google_analytics_code'),
'app_name': self.app.config.get('app_name'),
'user_id': self.user_id,
'username': self.username,
'email': self.email,
'url': self.request.url,
'path': self.request.path,
'query_string': self.request.query_string,
'path_for_language': self.path_for_language,
'is_mobile': self.is_mobile,
'locale_iso': locale_iso, # babel locale object
'locale_language': language.capitalize() + " (" + territory.capitalize() + ")", # babel locale object
'locale_language_id': language_id, # babel locale object
'locales': self.locales,
'provider_uris': self.provider_uris,
'provider_info': self.provider_info,
'enable_federated_login': self.app.config.get('enable_federated_login'),
'base_layout': self.get_base_layout
})
kwargs.update(self.auth_config)
if hasattr(self, 'form'):
kwargs['form'] = self.form
if self.messages:
kwargs['messages'] = self.messages
self.response.headers.add_header('X-UA-Compatible', 'IE=Edge,chrome=1')
self.response.write(self.jinja2.render_template(filename, **kwargs)) | lgpl-3.0 |
sergiopasra/numina | numina/modeling/gaussbox.py | 3 | 1678 | #
# Copyright 2014-2017 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import division
from scipy.stats import norm
import numpy as np
from astropy.modeling import Fittable1DModel, Parameter
import math
M_SQRT_2_PI = math.sqrt(2 * math.pi)
def norm_pdf_t(x):
return np.exp(-0.5 * x * x) / M_SQRT_2_PI
def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5):
"""Integrate a Gaussian profile."""
z = (x - mean) / stddev
z2 = z + hpix / stddev
z1 = z - hpix / stddev
return amplitude * (norm.cdf(z2) - norm.cdf(z1))
def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5):
"""Derivative of the integral of a Gaussian profile."""
z = (x - mean) / stddev
z2 = z + hpix / stddev
z1 = z - hpix / stddev
da = norm.cdf(z2) - norm.cdf(z1)
fp2 = norm_pdf_t(z2)
fp1 = norm_pdf_t(z1)
dl = -amplitude / stddev * (fp2 - fp1)
ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1)
dd = amplitude / stddev * (fp2 + fp1)
return da, dl, ds, dd
class GaussBox(Fittable1DModel):
"""Model for fitting a 1D Gaussina convolved with a square"""
amplitude = Parameter(default=1.0)
mean = Parameter(default=0.0)
stddev = Parameter(default=1.0)
hpix = Parameter(default=0.5, fixed=True)
@staticmethod
def evaluate(x, amplitude, mean, stddev, hpix):
return gauss_box_model(x, amplitude, mean, stddev, hpix)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev, hpix):
return gauss_box_model_deriv(x, amplitude, mean, stddev, hpix)
| gpl-3.0 |
mrquim/mrquimrepo | plugin.video.salts/js2py/legecy_translators/translator.py | 96 | 5676 | from flow import translate_flow
from constants import remove_constants, recover_constants
from objects import remove_objects, remove_arrays, translate_object, translate_array, set_func_translator
from functions import remove_functions, reset_inline_count
from jsparser import inject_before_lval, indent, dbg
TOP_GLOBAL = '''from js2py.pyjs import *\nvar = Scope( JS_BUILTINS )\nset_global_object(var)\n'''
def translate_js(js, top=TOP_GLOBAL):
"""js has to be a javascript source code.
returns equivalent python code."""
# Remove constant literals
no_const, constants = remove_constants(js)
#print 'const count', len(constants)
# Remove object literals
no_obj, objects, obj_count = remove_objects(no_const)
#print 'obj count', len(objects)
# Remove arrays
no_arr, arrays, arr_count = remove_arrays(no_obj)
#print 'arr count', len(arrays)
# Here remove and replace functions
reset_inline_count()
no_func, hoisted, inline = remove_functions(no_arr)
#translate flow and expressions
py_seed, to_register = translate_flow(no_func)
# register variables and hoisted functions
#top += '# register variables\n'
top += 'var.registers(%s)\n' % str(to_register + hoisted.keys())
#Recover functions
# hoisted functions recovery
defs = ''
#defs += '# define hoisted functions\n'
#print len(hoisted) , 'HH'*40
for nested_name, nested_info in hoisted.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args)
new_code += 'PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name)
defs += new_code +'\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name)
#defs += '# Everting ready!\n'
# inline functions recovery
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func(nested_name, nested_block, nested_args)
py_seed = inject_before_lval(py_seed, nested_name.split('@')[0], new_code)
# add hoisted definitiond - they have literals that have to be recovered
py_seed = defs + py_seed
#Recover arrays
for arr_lval, arr_code in arrays.iteritems():
translation, obj_count, arr_count = translate_array(arr_code, arr_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, arr_lval, translation)
#Recover objects
for obj_lval, obj_code in objects.iteritems():
translation, obj_count, arr_count = translate_object(obj_code, obj_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, obj_lval, translation)
#Recover constants
py_code = recover_constants(py_seed, constants)
return top + py_code
def translate_func(name, block, args):
"""Translates functions and all nested functions to Python code.
name - name of that function (global functions will be available under var while
inline will be available directly under this name )
block - code of the function (*with* brackets {} )
args - arguments that this function takes"""
inline = name.startswith('PyJsLvalInline')
real_name = ''
if inline:
name, real_name = name.split('@')
arglist = ', '.join(args) +', ' if args else ''
code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist)
# register local variables
scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary
for arg in args:
scope += ', %s:%s' %(repr(arg), arg)
if real_name:
scope += ', %s:%s' % (repr(real_name), name)
code += indent('var = Scope({%s}, var)\n' % scope)
block, nested_hoisted, nested_inline = remove_functions(block)
py_code, to_register = translate_flow(block)
#register variables declared with var and names of hoisted functions.
to_register += nested_hoisted.keys()
if to_register:
code += indent('var.registers(%s)\n'% str(to_register))
for nested_name, info in nested_hoisted.iteritems():
nested_block, nested_args = info
new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args)
# Now put definition of hoisted function on the top
code += indent(new_code)
code += indent('PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name))
code += indent('var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name))
for nested_name, info in nested_inline.iteritems():
nested_block, nested_args = info
new_code = translate_func(nested_name, nested_block, nested_args)
# Inject definitions of inline functions just before usage
# nested inline names have this format : LVAL_NAME@REAL_NAME
py_code = inject_before_lval(py_code, nested_name.split('@')[0], new_code)
if py_code.strip():
code += indent(py_code)
return code
set_func_translator(translate_func)
#print inject_before_lval(' chuj\n moj\n lval\nelse\n', 'lval', 'siema\njestem piter\n')
import time
#print time.time()
#print translate_js('if (1) console.log("Hello, World!"); else if (5) console.log("Hello world?");')
#print time.time()
t = """
var x = [1,2,3,4,5,6];
for (var e in x) {console.log(e); delete x[3];}
console.log(5 in [1,2,3,4,5]);
"""
SANDBOX ='''
import traceback
try:
%s
except:
print traceback.format_exc()
print
raw_input('Press Enter to quit')
'''
if __name__=='__main__':
# test with jq if works then it really works :)
#with open('jq.js', 'r') as f:
#jq = f.read()
#res = translate_js(jq)
res = translate_js(t)
dbg(SANDBOX% indent(res))
print 'Done' | gpl-2.0 |
egaxegax/django-dbcartajs | django/contrib/contenttypes/models.py | 100 | 6942 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text
from django.utils.encoding import python_2_unicode_compatible
class ContentTypeManager(models.Manager):
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.object_name.lower())
return self.__class__._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
ct = self._get_from_cache(opts)
except KeyError:
# Load or create the ContentType entry. The smart_text() is
# needed around opts.verbose_name_raw because name_raw might be a
# django.utils.functional.__proxy__ object.
ct, created = self.get_or_create(
app_label = opts.app_label,
model = opts.object_name.lower(),
defaults = {'name': smart_text(opts.verbose_name_raw)},
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.object_name.lower())
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.object_name.lower(),
name=smart_text(opts.verbose_name_raw),
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
model = ct.model_class()
key = (model._meta.app_label, model._meta.object_name.lower())
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __str__(self):
# self.name is deprecated in favor of using model's verbose_name, which
# can be translated. Formal deprecation is delayed until we have DB
# migration to be able to remove the field from the database along with
# the attribute.
#
# We return self.name only when users have changed its value from the
# initial verbose_name_raw and might rely on it.
model = self.model_class()
if not model or self.name != model._meta.verbose_name_raw:
return self.name
else:
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model,
only_installed=False)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| gpl-2.0 |
ContinuumIO/ashiba | enaml/enaml/widgets/datetime_selector.py | 1 | 2100 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Bool, Str, Typed, ForwardTyped, observe, set_default
from enaml.core.declarative import d_
from .bounded_datetime import BoundedDatetime, ProxyBoundedDatetime
class ProxyDatetimeSelector(ProxyBoundedDatetime):
""" The abstract defintion of a proxy DatetimeSelector object.
"""
#: A reference to the DatetimeSelector declaration.
declaration = ForwardTyped(lambda: DatetimeSelector)
def set_datetime_format(self, format):
raise NotImplementedError
def set_calendar_popup(self, popup):
raise NotImplementedError
class DatetimeSelector(BoundedDatetime):
""" A widget to edit a Python datetime.datetime object.
This is a geometrically smaller control than what is provided by
Calendar.
"""
#: A python date format string to format the datetime. If None is
#: supplied (or is invalid) the system locale setting is used.
#: This may not be supported by all backends.
datetime_format = d_(Str())
#: Whether to use a calendar popup for selecting the date.
calendar_popup = d_(Bool(False))
#: A datetime selector expands freely in width by default
hug_width = set_default('ignore')
#: A reference to the ProxyDateSelector object.
proxy = Typed(ProxyDatetimeSelector)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('datetime_format', 'calendar_popup'))
def _update_proxy(self, change):
""" An observer which updates the proxy with state change.
"""
# The superclass implementation is sufficient.
super(DatetimeSelector, self)._update_proxy(change)
| bsd-3-clause |
berrange/nova | nova/scheduler/filters/aggregate_image_properties_isolation.py | 20 | 2686 | # Copyright (c) 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
opts = [
cfg.StrOpt('aggregate_image_properties_isolation_namespace',
help='Force the filter to consider only keys matching '
'the given namespace.'),
cfg.StrOpt('aggregate_image_properties_isolation_separator',
default=".",
help='The separator used between the namespace and keys'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
"""AggregateImagePropertiesIsolation works with image properties."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""Checks a host in an aggregate that metadata key/value match
with image properties.
"""
cfg_namespace = CONF.aggregate_image_properties_isolation_namespace
cfg_separator = CONF.aggregate_image_properties_isolation_separator
spec = filter_properties.get('request_spec', {})
image_props = spec.get('image', {}).get('properties', {})
context = filter_properties['context']
metadata = db.aggregate_metadata_get_by_host(context, host_state.host)
for key, options in metadata.iteritems():
if (cfg_namespace and
not key.startswith(cfg_namespace + cfg_separator)):
continue
prop = image_props.get(key)
if prop and prop not in options:
LOG.debug("%(host_state)s fails image aggregate properties "
"requirements. Property %(prop)s does not "
"match %(options)s.",
{'host_state': host_state,
'prop': prop,
'options': options})
return False
return True
| apache-2.0 |
BitCurator/bitcurator | externals/py3fpdf/attic/font/helveticai.py | 26 | 2589 | fpdf_charwidths['helveticaI']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
'\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
'\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500} | gpl-3.0 |
mclois/iteexe | twisted/python/reflect.py | 16 | 22222 | # -*- test-case-name: twisted.test.test_reflect -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standardized versions of various cool and/or strange things that you can do
with Python's reflection capabilities.
"""
from __future__ import nested_scopes
# System Imports
import sys
import os
import types
import string
import pickle
import new
import traceback
import weakref
import re
RegexType = type(re.compile(""))
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class Settable:
"""
A mixin class for syntactic sugar. Lets you assign attributes by
calling with keyword arguments; for example, C{x(a=b,c=d,y=z)} is the
same as C{x.a=b;x.c=d;x.y=z}. The most useful place for this is
where you don't want to name a variable, but you do want to set
some attributes; for example, C{X()(y=z,a=b)}.
"""
def __init__(self, **kw):
self(**kw)
def __call__(self,**kw):
for key,val in kw.items():
setattr(self,key,val)
return self
class AccessorType(type):
"""Metaclass that generates properties automatically.
This is for Python 2.2 and up.
Using this metaclass for your class will give you explicit accessor
methods; a method called set_foo, will automatically create a property
'foo' that uses set_foo as a setter method. Same for get_foo and del_foo.
Note that this will only work on methods that are present on class
creation. If you add methods after the class is defined they will not
automatically become properties. Likewise, class attributes will only
be used if they are present upon class creation, and no getter function
was set - if a getter is present, the class attribute will be ignored.
This is a 2.2-only alternative to the Accessor mixin - just set in your
class definition::
__metaclass__ = AccessorType
"""
def __init__(self, name, bases, d):
type.__init__(self, name, bases, d)
accessors = {}
prefixs = ["get_", "set_", "del_"]
for k in d.keys():
v = getattr(self, k)
for i in range(3):
if k.startswith(prefixs[i]):
accessors.setdefault(k[4:], [None, None, None])[i] = v
for name, (getter, setter, deler) in accessors.items():
# create default behaviours for the property - if we leave
# the getter as None we won't be able to getattr, etc..
if getter is None:
if hasattr(self, name):
value = getattr(self, name)
def getter(this, value=value, name=name):
if this.__dict__.has_key(name):
return this.__dict__[name]
else:
return value
else:
def getter(this, name=name):
if this.__dict__.has_key(name):
return this.__dict__[name]
else:
raise AttributeError, "no such attribute %r" % name
if setter is None:
def setter(this, value, name=name):
this.__dict__[name] = value
if deler is None:
def deler(this, name=name):
del this.__dict__[name]
setattr(self, name, property(getter, setter, deler, ""))
class PropertyAccessor(object):
"""A mixin class for Python 2.2 that uses AccessorType.
This provides compatability with the pre-2.2 Accessor mixin, up
to a point.
Extending this class will give you explicit accessor methods; a
method called set_foo, for example, is the same as an if statement
in __setattr__ looking for 'foo'. Same for get_foo and del_foo.
There are also reallyDel and reallySet methods, so you can
override specifics in subclasses without clobbering __setattr__
and __getattr__, or using non-2.1 compatible code.
There is are incompatibilities with Accessor - accessor
methods added after class creation will *not* be detected. OTOH,
this method is probably way faster.
In addition, class attributes will only be used if no getter
was defined, and instance attributes will not override getter methods
whereas in original Accessor the class attribute or instance attribute
would override the getter method.
"""
# addendum to above:
# The behaviour of Accessor is wrong IMHO, and I've found bugs
# caused by it.
# -- itamar
__metaclass__ = AccessorType
def reallySet(self, k, v):
self.__dict__[k] = v
def reallyDel(self, k):
del self.__dict__[k]
class Accessor:
"""
Extending this class will give you explicit accessor methods; a
method called C{set_foo}, for example, is the same as an if statement
in L{__setattr__} looking for C{'foo'}. Same for C{get_foo} and
C{del_foo}. There are also L{reallyDel} and L{reallySet} methods,
so you can override specifics in subclasses without clobbering
L{__setattr__} and L{__getattr__}.
This implementation is for Python 2.1.
"""
def __setattr__(self, k,v):
kstring='set_%s'%k
if hasattr(self.__class__,kstring):
return getattr(self,kstring)(v)
else:
self.reallySet(k,v)
def __getattr__(self, k):
kstring='get_%s'%k
if hasattr(self.__class__,kstring):
return getattr(self,kstring)()
raise AttributeError("%s instance has no accessor for: %s" % (qual(self.__class__),k))
def __delattr__(self, k):
kstring='del_%s'%k
if hasattr(self.__class__,kstring):
getattr(self,kstring)()
return
self.reallyDel(k)
def reallySet(self, k,v):
"""
*actually* set self.k to v without incurring side-effects.
This is a hook to be overridden by subclasses.
"""
if k == "__dict__":
self.__dict__.clear()
self.__dict__.update(v)
else:
self.__dict__[k]=v
def reallyDel(self, k):
"""
*actually* del self.k without incurring side-effects. This is a
hook to be overridden by subclasses.
"""
del self.__dict__[k]
# just in case
OriginalAccessor = Accessor
class Summer(Accessor):
"""
Extend from this class to get the capability to maintain 'related
sums'. Have a tuple in your class like the following::
sums=(('amount','credit','credit_total'),
('amount','debit','debit_total'))
and the 'credit_total' member of the 'credit' member of self will
always be incremented when the 'amount' member of self is
incremented, similiarly for the debit versions.
"""
def reallySet(self, k,v):
"This method does the work."
for sum in self.sums:
attr=sum[0]
obj=sum[1]
objattr=sum[2]
if k == attr:
try:
oldval=getattr(self, attr)
except:
oldval=0
diff=v-oldval
if hasattr(self, obj):
ob=getattr(self,obj)
if ob is not None:
try:oldobjval=getattr(ob, objattr)
except:oldobjval=0.0
setattr(ob,objattr,oldobjval+diff)
elif k == obj:
if hasattr(self, attr):
x=getattr(self,attr)
setattr(self,attr,0)
y=getattr(self,k)
Accessor.reallySet(self,k,v)
setattr(self,attr,x)
Accessor.reallySet(self,y,v)
Accessor.reallySet(self,k,v)
class QueueMethod:
""" I represent a method that doesn't exist yet."""
def __init__(self, name, calls):
self.name = name
self.calls = calls
def __call__(self, *args):
self.calls.append((self.name, args))
def funcinfo(function):
"""
this is more documentation for myself than useful code.
"""
code=function.func_code
name=function.func_name
argc=code.co_argcount
argv=code.co_varnames[:argc]
defaults=function.func_defaults
out = []
out.append('The function %s accepts %s arguments' % (name ,argc))
if defaults:
required=argc-len(defaults)
out.append('It requires %s arguments' % required)
out.append('The arguments required are: %s' % argv[:required])
out.append('additional arguments are:')
for i in range(argc-required):
j=i+required
out.append('%s which has a default of' % (argv[j], defaults[i]))
return out
ISNT=0
WAS=1
IS=2
def fullFuncName(func):
qualName = (str(pickle.whichmodule(func, func.__name__)) + '.' + func.__name__)
if namedObject(qualName) is not func:
raise Exception("Couldn't find %s as %s." % (func, qualName))
return qualName
def qual(clazz):
"""Return full import path of a class."""
return clazz.__module__ + '.' + clazz.__name__
def getcurrent(clazz):
assert type(clazz) == types.ClassType, 'must be a class...'
module = namedModule(clazz.__module__)
currclass = getattr(module, clazz.__name__, None)
if currclass is None:
return clazz
return currclass
def getClass(obj):
"""Return the class or type of object 'obj'.
Returns sensible result for oldstyle and newstyle instances and types."""
if hasattr(obj, '__class__'):
return obj.__class__
else:
return type(obj)
# class graph nonsense
# I should really have a better name for this...
def isinst(inst,clazz):
if type(inst) != types.InstanceType or type(clazz)!=types.ClassType:
return isinstance(inst,clazz)
cl = inst.__class__
cl2 = getcurrent(cl)
clazz = getcurrent(clazz)
if issubclass(cl2,clazz):
if cl == cl2:
return WAS
else:
inst.__class__ = cl2
return IS
else:
return ISNT
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = string.split(name, '.')
module = namedModule(string.join(classSplit[:-1], '.'))
return getattr(module, classSplit[-1])
namedClass = namedObject # backwards compat
def namedAny(name):
"""Get a fully named package, module, module-global object, or attribute.
"""
names = name.split('.')
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
try:
trialname = '.'.join(moduleNames)
topLevelPackage = __import__(trialname)
except ImportError:
# if the ImportError happened in the module being imported,
# this is a failure that should be handed to our caller.
# count stack frames to tell the difference.
exc_info = sys.exc_info()
if len(traceback.extract_tb(exc_info[2])) > 1:
try:
# Clean up garbage left in sys.modules.
del sys.modules[trialname]
except KeyError:
# Python 2.4 has fixed this. Yay!
pass
raise exc_info[0], exc_info[1], exc_info[2]
moduleNames.pop()
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
def _reclass(clazz):
clazz = getattr(namedModule(clazz.__module__),clazz.__name__)
clazz.__bases__ = tuple(map(_reclass, clazz.__bases__))
return clazz
def macro(name, filename, source, **identifiers):
"""macro(name, source, **identifiers)
This allows you to create macro-like behaviors in python. See
twisted.python.hook for an example of its usage.
"""
if not identifiers.has_key('name'):
identifiers['name'] = name
source = source % identifiers
codeplace = "<%s (macro)>" % filename
code = compile(source, codeplace, 'exec')
# shield your eyes!
sm = sys.modules
tprm = "twisted.python.reflect.macros"
if not sm.has_key(tprm):
macros = new.module(tprm)
sm[tprm] = macros
macros.count = 0
macros = sm[tprm]
macros.count += 1
macroname = 'macro_' + str(macros.count)
tprmm = tprm + '.' + macroname
mymod = new.module(tprmm)
sys.modules[tprmm] = mymod
setattr(macros, macroname, mymod)
dict = mymod.__dict__
# Before we go on, I guess I should explain why I just did that. Basically
# it's a gross hack to get epydoc to work right, but the general idea is
# that it will be a useful aid in debugging in _any_ app which expects
# sys.modules to have the same globals as some function. For example, it
# would be useful if you were foolishly trying to pickle a wrapped function
# directly from a class that had been hooked.
exec code in dict, dict
return dict[name]
def _determineClass(x):
try:
return x.__class__
except:
return type(x)
def _determineClassName(x):
c = _determineClass(x)
try:
return c.__name__
except:
try:
return str(c)
except:
return '<BROKEN CLASS AT %s>' % id(c)
def safe_repr(o):
"""safe_repr(anything) -> string
Returns a string representation of an object, or a string containing a
traceback, if that object's __repr__ raised an exception.
"""
try:
return repr(o)
except:
io = StringIO.StringIO()
traceback.print_stack(file=io)
whati = _determineClassName(o)
swron = io.getvalue()
gwith = id(o)
you ='<%s instance at %s with repr error %s>' % (
whati,swron,gwith)
return you
def safe_str(o):
"""safe_str(anything) -> string
Returns a string representation of an object, or a string containing a
traceback, if that object's __str__ raised an exception.
"""
try:
return str(o)
except:
strExc = '\n'.join(traceback.format_exception(*sys.exc_info()))
clsName = _determineClassName(o)
obId = id(o)
return '<%s instance at %s with str error %s>' % (
clsName, obId, strExc)
##the following were factored out of usage
def allYourBase(classObj, baseClass=None):
"""allYourBase(classObj, baseClass=None) -> list of all base
classes that are subclasses of baseClass, unless it is None,
in which case all bases will be added.
"""
l = []
accumulateBases(classObj, l, baseClass)
return l
def accumulateBases(classObj, l, baseClass=None):
for base in classObj.__bases__:
if baseClass is None or issubclass(base, baseClass):
l.append(base)
accumulateBases(base, l, baseClass)
def prefixedMethodNames(classObj, prefix):
"""A list of method names with a given prefix in a given class.
"""
dct = {}
addMethodNamesToDict(classObj, dct, prefix)
return dct.keys()
def addMethodNamesToDict(classObj, dict, prefix, baseClass=None):
"""
addMethodNamesToDict(classObj, dict, prefix, baseClass=None) -> dict
this goes through 'classObj' (and its bases) and puts method names
starting with 'prefix' in 'dict' with a value of 1. if baseClass isn't
None, methods will only be added if classObj is-a baseClass
If the class in question has the methods 'prefix_methodname' and
'prefix_methodname2', the resulting dict should look something like:
{"methodname": 1, "methodname2": 1}.
"""
for base in classObj.__bases__:
addMethodNamesToDict(base, dict, prefix, baseClass)
if baseClass is None or baseClass in classObj.__bases__:
for name, method in classObj.__dict__.items():
optName = name[len(prefix):]
if ((type(method) is types.FunctionType)
and (name[:len(prefix)] == prefix)
and (len(optName))):
dict[optName] = 1
def prefixedMethods(obj, prefix=''):
"""A list of methods with a given prefix on a given instance.
"""
dct = {}
accumulateMethods(obj, dct, prefix)
return dct.values()
def accumulateMethods(obj, dict, prefix='', curClass=None):
"""accumulateMethods(instance, dict, prefix)
I recurse through the bases of instance.__class__, and add methods
beginning with 'prefix' to 'dict', in the form of
{'methodname':*instance*method_object}.
"""
if not curClass:
curClass = obj.__class__
for base in curClass.__bases__:
accumulateMethods(obj, dict, prefix, base)
for name, method in curClass.__dict__.items():
optName = name[len(prefix):]
if ((type(method) is types.FunctionType)
and (name[:len(prefix)] == prefix)
and (len(optName))):
dict[optName] = getattr(obj, name)
def accumulateClassDict(classObj, attr, adict, baseClass=None):
"""Accumulate all attributes of a given name in a class heirarchy into a single dictionary.
Assuming all class attributes of this name are dictionaries.
If any of the dictionaries being accumulated have the same key, the
one highest in the class heirarchy wins.
(XXX: If \"higest\" means \"closest to the starting class\".)
Ex::
| class Soy:
| properties = {\"taste\": \"bland\"}
|
| class Plant:
| properties = {\"colour\": \"green\"}
|
| class Seaweed(Plant):
| pass
|
| class Lunch(Soy, Seaweed):
| properties = {\"vegan\": 1 }
|
| dct = {}
|
| accumulateClassDict(Lunch, \"properties\", dct)
|
| print dct
{\"taste\": \"bland\", \"colour\": \"green\", \"vegan\": 1}
"""
for base in classObj.__bases__:
accumulateClassDict(base, attr, adict)
if baseClass is None or baseClass in classObj.__bases__:
adict.update(classObj.__dict__.get(attr, {}))
def accumulateClassList(classObj, attr, listObj, baseClass=None):
"""Accumulate all attributes of a given name in a class heirarchy into a single list.
Assuming all class attributes of this name are lists.
"""
for base in classObj.__bases__:
accumulateClassList(base, attr, listObj)
if baseClass is None or baseClass in classObj.__bases__:
listObj.extend(classObj.__dict__.get(attr, []))
def isSame(a, b):
return (a is b)
def isLike(a, b):
return (a == b)
def modgrep(goal):
return objgrep(sys.modules, goal, isLike, 'sys.modules')
def isOfType(start, goal):
return ((type(start) is goal) or
(isinstance(start, types.InstanceType) and
start.__class__ is goal))
def findInstances(start, t):
return objgrep(start, t, isOfType)
def objgrep(start, goal, eq=isLike, path='', paths=None, seen=None, showUnknowns=0, maxDepth=None):
'''An insanely CPU-intensive process for finding stuff.
'''
if paths is None:
paths = []
if seen is None:
seen = {}
if eq(start, goal):
paths.append(path)
if seen.has_key(id(start)):
if seen[id(start)] is start:
return
if maxDepth is not None:
if maxDepth == 0:
return
maxDepth -= 1
seen[id(start)] = start
if isinstance(start, types.DictionaryType):
r = []
for k, v in start.items():
objgrep(k, goal, eq, path+'{'+repr(v)+'}', paths, seen, showUnknowns, maxDepth)
objgrep(v, goal, eq, path+'['+repr(k)+']', paths, seen, showUnknowns, maxDepth)
elif isinstance(start, types.ListType) or isinstance(start, types.TupleType):
for idx in xrange(len(start)):
objgrep(start[idx], goal, eq, path+'['+str(idx)+']', paths, seen, showUnknowns, maxDepth)
elif isinstance(start, types.MethodType):
objgrep(start.im_self, goal, eq, path+'.im_self', paths, seen, showUnknowns, maxDepth)
objgrep(start.im_func, goal, eq, path+'.im_func', paths, seen, showUnknowns, maxDepth)
objgrep(start.im_class, goal, eq, path+'.im_class', paths, seen, showUnknowns, maxDepth)
elif hasattr(start, '__dict__'):
for k, v in start.__dict__.items():
objgrep(v, goal, eq, path+'.'+k, paths, seen, showUnknowns, maxDepth)
if isinstance(start, types.InstanceType):
objgrep(start.__class__, goal, eq, path+'.__class__', paths, seen, showUnknowns, maxDepth)
elif isinstance(start, weakref.ReferenceType):
objgrep(start(), goal, eq, path+'()', paths, seen, showUnknowns, maxDepth)
elif (isinstance(start, types.StringTypes+
(types.IntType, types.FunctionType,
types.BuiltinMethodType, RegexType, types.FloatType,
types.NoneType, types.FileType)) or
type(start).__name__ in ('wrapper_descriptor', 'method_descriptor',
'member_descriptor', 'getset_descriptor')):
pass
elif showUnknowns:
print 'unknown type', type(start), start
return paths
def _startswith(s, sub):
# aug python2.1
return s[:len(sub)] == sub
def filenameToModuleName(fn):
"""Convert a name in the filesystem to the name of the Python module it is.
This is agressive about getting a module name back from a file; it will
always return a string. Agressive means 'sometimes wrong'; it won't look
at the Python path or try to do any error checking: don't use this method
unless you already know that the filename you're talking about is a Python
module.
"""
fullName = os.path.abspath(fn)
modName = os.path.splitext(os.path.basename(fn))[0]
while 1:
fullName = os.path.dirname(fullName)
if os.path.exists(os.path.join(fullName, "__init__.py")):
modName = "%s.%s" % (os.path.basename(fullName), modName)
else:
break
return modName
#boo python
| gpl-2.0 |
CuteLemon/Learn | NewsAPI_Scraper/db_operation.py | 1 | 1050 | import pymongo as Mongo
DB_NAME = 'localhost'
DB_PORT = 27017
TEST_JSON = {'url':'http://hello.com','content':'Lemon Tree'}
class DB():
def __init__(self,db,port):
self.client = Mongo.MongoClient(db,port)
self.db = self.client.test
self.collect = self.db.test_collect
def insert(self,c):
self.collect.insert_one(c)
def find(self,k):
return self.collect.find(k)
def delete(self,k):
return self.collect.delete_many(k)
def close(self):
self.client.close()
if __name__ == '__main__':
# Client = Mongo.MongoClient(DB,PORT)
# db = Client.test
# collect = db.test_collect
# collect.insert(TEST_JSON)
# for x in collect.find({'content':'Lemon Tree'}):
# print x
# Client.close()
print 'mongodb test start:'
db = DB(DB_NAME,DB_PORT)
db.insert(TEST_JSON)
result = db.find({'content':'Lemon Tree'})
for x in result:
print x
db.delete({'content':'Lemon Tree'})
db.close()
print 'mongodb test complete!'
| gpl-3.0 |
40223117cda/2015_w11 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 |
r39132/airflow | airflow/contrib/kubernetes/volume.py | 4 | 1343 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class Volume:
"""Defines Kubernetes Volume"""
def __init__(self, name, configs):
""" Adds Kubernetes Volume to pod. allows pod to access features like ConfigMaps
and Persistent Volumes
:param name: the name of the volume mount
:type name: str
:param configs: dictionary of any features needed for volume.
We purposely keep this vague since there are multiple volume types with changing
configs.
:type configs: dict
"""
self.name = name
self.configs = configs
| apache-2.0 |
surgebiswas/poker | PokerBots_2017/Johnny/setuptools/command/alias.py | 130 | 2408 | from distutils.errors import DistutilsOptionError
from six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardSoftShear/Normal_Load/Sigma_n_1/compare_HDF5_ALL.py | 424 | 3382 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# the real essi hdf5 results
h5_result_new = sys.argv[1]
h5_result_ori = sys.argv[2]
disp_pass_or_fail=h5diff_disp(h5_result_ori,h5_result_new)
Gauss_pass_or_fail = 1
try:
Gauss_pass_or_fail=h5diff_Gauss_output(h5_result_ori,h5_result_new)
except KeyError:
pass
Element_Output_pass_or_fail = 1
try:
Element_Output_pass_or_fail=h5diff_Element_output(h5_result_ori,h5_result_new)
except KeyError:
pass
if disp_pass_or_fail and Gauss_pass_or_fail and Element_Output_pass_or_fail:
print headOK(), "All hdf5 results are the same."
print headOKCASE(),"-----------Done this case!-----------------"
else:
if disp_pass_or_fail==0:
print headFailed(),"-----------Displacement has mismatches!-----------------"
if Gauss_pass_or_fail==0:
print headFailed(),"-----------StressStrain has mismatches!-----------------"
if Element_Output_pass_or_fail==0:
print headFailed(),"-----------Element output has mismatches!-----------------"
# # The allowable tolerance between the ori_vals and new_vals values.
# tolerance=1e-5
# machine_epsilon=1e-16
# ori_vals=[]
# new_vals=[]
# ori_vals.append(find_max_disp(h5_result_ori,0))
# new_vals.append(find_max_disp(h5_result_new,0))
# # if multiple steps, compare the max_disp of random steps
# Nstep = find_disp_Nstep(h5_result_ori)
# if Nstep>5 :
# for i in xrange(1,4):
# test_step=random.randint(1,Nstep-1)
# ori_vals.append(find_max_disp(h5_result_ori,test_step))
# new_vals.append(find_max_disp(h5_result_new,test_step))
# # calculate the errors
# errors=[]
# for index, x in enumerate(ori_vals):
# if(abs(x))>machine_epsilon:
# errors.append(abs((new_vals[index]-x)/x))
# else:
# errors.append(machine_epsilon)
# # compare and form the flags
# flags=[]
# for item in errors:
# if abs(item)<tolerance:
# flags.append('pass')
# else:
# flags.append('failed')
# # print the results
# case_flag=1
# print headrun() , "-----------Testing results-----------------"
# print headstep() ,'{0} {1} {2} {3}'.format('back_value ','new_value ','error ','flag')
# for index, x in enumerate(errors):
# if(abs(x)<tolerance):
# print headOK() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# else:
# case_flag=0
# print headFailed() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# if(case_flag==1):
# print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# automatically find the script directory.
# sys.path.append("/home/yuan/Dropbox/3essi_self_verification/test_suite/scripts" )
# script_dir=sys.argv[1]
# print headstart() , "Running test cases..."
# print headlocation(), os.path.dirname(os.path.abspath(__file__))
# file_in=open("ori_vals_values.txt","r")
# Input the 1st line, which is the ori_vals value.
# ori_vals= float(file_in.readline())
# Input the 2nd line, which is the HDF5 output filename.
# new_vals=find_max_disp(file_in.readline());
# file_in.close() | cc0-1.0 |
bbondy/brianbondy.gae | libs/werkzeug/testsuite/contrib/cache.py | 1 | 5814 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class CacheTestCase(WerkzeugTestCase):
make_cache = None
def test_generic_get_dict(self):
c = self.make_cache()
assert c.set('a', 'a')
assert c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
assert c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
def test_generic_set_get(self):
c = self.make_cache()
for i in range(3):
assert c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i, result
def test_generic_get_set(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_generic_get_many(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.set('spam', 'eggs')
self.assert_equal(list(c.get_many('foo', 'spam')), [['bar'], 'eggs'])
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_generic_expire(self):
c = self.make_cache()
assert c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_generic_add(self):
c = self.make_cache()
# sanity check that add() works like set()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert not c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_generic_delete(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert c.delete('foo')
assert c.get('foo') is None
def test_generic_delete_many(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.add('spam', 'eggs')
assert c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_generic_inc_dec(self):
c = self.make_cache()
assert c.set('foo', 1)
assert c.inc('foo') == c.get('foo') == 2
assert c.dec('foo') == c.get('foo') == 1
assert c.delete('foo')
def test_generic_true_false(self):
c = self.make_cache()
assert c.set('foo', True)
assert c.get('foo') == True
assert c.set('bar', False)
assert c.get('bar') == False
class SimpleCacheTestCase(CacheTestCase):
make_cache = cache.SimpleCache
class FileSystemCacheTestCase(CacheTestCase):
tmp_dir = None
def make_cache(self, **kwargs):
if self.tmp_dir is None:
self.tmp_dir = tempfile.mkdtemp()
return cache.FileSystemCache(cache_dir=self.tmp_dir, **kwargs)
def teardown(self):
if self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
c = self.make_cache(threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
assert c.set(str(i), i)
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
c = self.make_cache()
assert c.set('foo', 'bar')
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 1
assert c.clear()
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 0
class RedisCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
assert c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
class MemcachedCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| mit |
js0701/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/linux_based_platform_backend_unittest.py | 9 | 4228 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry.core import util
from telemetry.internal.platform import linux_based_platform_backend
import mock
class TestBackend(linux_based_platform_backend.LinuxBasedPlatformBackend):
# pylint: disable=abstract-method
def __init__(self):
super(TestBackend, self).__init__()
self._mock_files = {}
def SetMockFile(self, filename, output):
self._mock_files[filename] = output
def GetFileContents(self, filename):
return self._mock_files[filename]
def GetClockTicks(self):
return 41
class LinuxBasedPlatformBackendTest(unittest.TestCase):
def SetMockFileInBackend(self, backend, real_file, mock_file):
with open(os.path.join(util.GetUnittestDataDir(), real_file)) as f:
backend.SetMockFile(mock_file, f.read())
def testGetSystemCommitCharge(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
backend = TestBackend()
self.SetMockFileInBackend(backend, 'proc_meminfo', '/proc/meminfo')
result = backend.GetSystemCommitCharge()
# 25252140 == MemTotal - MemFree - Buffers - Cached (in kB)
self.assertEquals(result, 25252140)
def testGetSystemTotalPhysicalMemory(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
backend = TestBackend()
self.SetMockFileInBackend(backend, 'proc_meminfo', '/proc/meminfo')
result = backend.GetSystemTotalPhysicalMemory()
# 67479191552 == MemTotal * 1024
self.assertEquals(result, 67479191552)
def testGetCpuStatsBasic(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
backend = TestBackend()
self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
result = backend.GetCpuStats(1)
self.assertEquals(result, {'CpuProcessTime': 22.0})
def testGetCpuTimestampBasic(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
jiffies_grep_string = """
jiffies
jiffies a1111
.last_jiffies : 4307239958
.next_jiffies : 4307239968
jiffies: 10505463300
jiffies: 10505463333
"""
with mock.patch.object(
linux_based_platform_backend.LinuxBasedPlatformBackend,
'RunCommand', return_value=jiffies_grep_string) as mock_method:
backend = linux_based_platform_backend.LinuxBasedPlatformBackend()
result = backend.GetCpuTimestamp()
self.assertEquals(result, {'TotalTime': 105054633.0})
mock_method.assert_call_once_with(
['grep', '-m', '1', 'jiffies:', '/proc/timer_list'])
def testGetMemoryStatsBasic(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
backend = TestBackend()
self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
self.SetMockFileInBackend(backend, 'status', '/proc/1/status')
self.SetMockFileInBackend(backend, 'smaps', '/proc/1/smaps')
result = backend.GetMemoryStats(1)
self.assertEquals(result, {'PrivateDirty': 5324800,
'VM': 1025978368,
'VMPeak': 1050099712,
'WorkingSetSize': 84000768,
'WorkingSetSizePeak': 144547840})
def testGetMemoryStatsNoHWM(self):
if not linux_based_platform_backend.resource:
logging.warning('Test not supported')
return
backend = TestBackend()
self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
self.SetMockFileInBackend(backend, 'status_nohwm', '/proc/1/status')
self.SetMockFileInBackend(backend, 'smaps', '/proc/1/smaps')
result = backend.GetMemoryStats(1)
self.assertEquals(result, {'PrivateDirty': 5324800,
'VM': 1025978368,
'VMPeak': 1025978368,
'WorkingSetSize': 84000768,
'WorkingSetSizePeak': 84000768})
| bsd-3-clause |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/shared.py | 41 | 2869 | from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class SharedBaseIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, video_id)
if self._FILE_NOT_FOUND in webpage:
raise ExtractorError(
'Video %s does not exist' % video_id, expected=True)
video_url = self._extract_video_url(webpage, video_id, url)
title = base64.b64decode(self._html_search_meta(
'full:title', webpage, 'title').encode('utf-8')).decode('utf-8')
filesize = int_or_none(self._html_search_meta(
'full:size', webpage, 'file size', fatal=False))
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'filesize': filesize,
'title': title,
}
class SharedIE(SharedBaseIE):
IE_DESC = 'shared.sx'
_VALID_URL = r'https?://shared\.sx/(?P<id>[\da-z]{10})'
_FILE_NOT_FOUND = '>File does not exist<'
_TEST = {
'url': 'http://shared.sx/0060718775',
'md5': '106fefed92a8a2adb8c98e6a0652f49b',
'info_dict': {
'id': '0060718775',
'ext': 'mp4',
'title': 'Bmp4',
'filesize': 1720110,
},
}
def _extract_video_url(self, webpage, video_id, url):
download_form = self._hidden_inputs(webpage)
video_page = self._download_webpage(
url, video_id, 'Downloading video page',
data=urlencode_postdata(download_form),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': url,
})
video_url = self._html_search_regex(
r'data-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
video_page, 'video URL', group='url')
return video_url
class VivoIE(SharedBaseIE):
IE_DESC = 'vivo.sx'
_VALID_URL = r'https?://vivo\.sx/(?P<id>[\da-z]{10})'
_FILE_NOT_FOUND = '>The file you have requested does not exists or has been removed'
_TEST = {
'url': 'http://vivo.sx/d7ddda0e78',
'md5': '15b3af41be0b4fe01f4df075c2678b2c',
'info_dict': {
'id': 'd7ddda0e78',
'ext': 'mp4',
'title': 'Chicken',
'filesize': 528031,
},
}
def _extract_video_url(self, webpage, video_id, *args):
return self._parse_json(
self._search_regex(
r'InitializeStream\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'stream', group='url'),
video_id,
transform_source=lambda x: base64.b64decode(
x.encode('ascii')).decode('utf-8'))[0]
| gpl-3.0 |
jimgong92/allezViens | connect.py | 1 | 8842 | from models import *
from run import db
import sys
import math
import hashlib
import time
from communication import sendPickNotificationEmail
'''DATABASE INSERTION/UPDATE'''
#Adds driver to database
def addDriver(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
driver = Driver(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(driver)
save()
return driver
#Adds passenger to database
def addPassenger(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
passenger = Passenger(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(passenger)
save()
return passenger
#Adds a driver to a passenger's picks
def pickDriver(driverID, passengerID, add):
driver = getDriver(driverID)
passenger = getPassenger(passengerID)
#Toggle pick based on whether driver is already in passenger's picks
#currentPicks = findPassengerPicks(passengerID)
# if (driver in currentPicks):
# passenger.unpick(driver)
# else:
# passenger.pick(driver)
if(add):
passenger.pick(driver)
else:
passenger.unpick(driver)
save()
#Adds a passenger to a driver's picks
def pickPassenger(passengerID, driverID, add):
passenger = getPassenger(passengerID)
driver = getDriver(driverID)
# currentPicks = findDriverPicks(driverID)
# if (passenger in currentPicks):
# driver.unpick(passenger)
# else:
# driver.pick(passenger)
if(add):
driver.pick(passenger)
else:
driver.unpick(passenger)
save()
#Validates driver
def validateDriver(driverID):
driver = getDriver(driverID)
driver.validateDriver()
save()
#Validates passenger
def validatePassenger(passengerID):
passenger = getPassenger(passengerID)
passenger.validatePassenger()
save()
def updatePassenger(passengerDict):
passenger = getPassenger(passengerDict['email'])
return update(passenger,passengerDict)
def updateDriver(driverDict):
driver = getDriver(driverDict['email'])
return update(driver,driverDict)
#Updates given model
def update(model, dictionary):
if(model != ''):
model.oLat = dictionary['oLat']
model.oLon = dictionary['oLon']
model.dLat = dictionary['dLat']
model.dLon = dictionary['dLon']
model.date = dictionary['date']
model.alias = dictionary['alias']
db.session.add(model)
save()
return True
else:
return False
'''DATABASE GET'''
#Retrieve driver instance by ID
def getDriver(driverID):
try:
result = Driver.query.filter_by(email=driverID).first()
except:
result = ''
finally:
return result
#Retrieve passenger instance by ID
def getPassenger(passengerID):
try:
result = Passenger.query.filter_by(email=passengerID).first()
except:
result = ''
finally:
return result
#Returns all drivers that contain passenger route and same date
#Identifies drivers whose boundary box contains the passenger's route
#PARAMS: Passenger's origin and destination coordinates
def findMatchableDrivers(oLat, oLon, dLat, dLon, date):
drivers = Driver.query.filter(Driver.date == date).all()
res = []
for i in range(len(drivers)):
minLat, maxLat = min(drivers[i].oLat, drivers[i].dLat), max(drivers[i].oLat, drivers[i].dLat)
minLon, maxLon = min(drivers[i].oLon, drivers[i].dLon), max(drivers[i].oLon, drivers[i].dLon)
if (minLat <= oLat <= maxLat and minLat <= dLat <= maxLat):
if (minLon <= oLon <= maxLon and minLon <= dLon <= maxLon):
res.append(drivers[i])
return formatResults(res)
#Returns all passengers within given bound box and same date
#Returns passengers whose coordinates are in the driver's boundary box
#PARAMS: Driver's origin and destination coordinates
def findMatchablePassengers(oLat, oLon, dLat, dLon, date):
minLat, maxLat = min(oLat, dLat), max(oLat, dLat)
minLon, maxLon = min(oLon, dLon), max(oLon, dLon)
maxLat, minLon = makeBuffer(maxLat,minLon, 5, "NW")
minLat, maxLon = makeBuffer(minLat,maxLon, 5, "SE")
passengers = Passenger.query.filter(Passenger.date == date,
Passenger.oLat >= minLat, Passenger.oLat <= maxLat,
Passenger.dLat >= minLat, Passenger.dLat <= maxLat,
Passenger.oLon >= minLon, Passenger.oLon <= maxLon,
Passenger.dLon >= minLon, Passenger.dLon <= maxLon).all()
return formatResults(passengers)
#Returns all picks by given driver
def findDriverPicks(driverID):
return getDriver(driverID).picks
#Returns all picks by given driver
def findPassengerPicks(passengerID):
return getPassenger(passengerID).picks
#Returns object with user's email, origin, destination, and pick information
def getInfoByUrl(url):
match = Driver.query.filter_by(editURL=url).all()
if(len(match)>0):
driver = match[0]
picks = findDriverPicks(driver.email)
return 'D', objectifyWithPickInfo(driver, picks)
match = Passenger.query.filter_by(editURL=url).all()
if(len(match)>0):
passenger = match[0]
picks = findPassengerPicks(passenger.email)
return 'P', objectifyWithPickInfo(passenger, picks)
return 'NA', False
#Retrieves driver's info by email
def getDriverInfo(email):
driver = getDriver(email)
picks = findDriverPicks(driver.email)
return objectifyWithPickInfo(driver,picks)
#Retrieves passenger's info by email
def getPassengerInfo(email):
passenger = getPassenger(email)
picks = findPassengerPicks(passenger.email)
return objectifyWithPickInfo(passenger,picks)
#Validates existing urls
def urlExists(url, validate):
urlType, info = getInfoByUrl(url)
if(urlType == 'P'):
if(validate):
validatePassenger(info['email'])
return True
elif(urlType == 'D'):
if(validate):
validateDriver(info['email'])
return True
else:
return False
def sendMessage(to, sender, message, fromType):
sent = True
try:
if(fromType[0].upper()=='D'):
passenger = getPassenger(to)
url = passenger.editURL
else:
driver = getDriver(to)
url = driver.editURL
sendPickNotificationEmail(to, sender, url)
except:
sent = False
finally:
return sent
'''DATABASE DELETION'''
#Deletes driver + route from database
def deleteDriver(id):
driver = getDriver(id)
db.session.delete(driver)
save()
return ''
#Deletes passenger + route from database
def deletePassenger(id):
passenger = getPassenger(id)
db.session.delete(passenger)
save()
return ''
'''HELPER FUNCTIONS'''
#Commits db session changes
def save():
print 'save function'
for obj in db.session:
print obj
try:
db.session.commit()
except:
e = sys.exc_info()[0]
print e
print 'Error in session D:'
finally:
print 'after db.session.commit()'
#Returns JSON-friendly data from a model array
def formatResults(modelArray):
res = []
for i in range(len(modelArray)):
print 'in for loop'
res.append(objectify(modelArray[i]))
return res
#Pulls model data into JSON format
def objectify(model):
obj = {
"email": model.email,
"alias": model.alias,
"origin": [float(model.oLat), float(model.oLon)],
"destination": [float(model.dLat), float(model.dLon)],
"date": model.date
}
return obj
#Extends objectify with pick information
def objectifyWithPickInfo(model, picks):
obj = objectify(model)
obj["picks"] = parseUserPicks(model, picks)
return obj
#Takes users pick information and returns array of each pick denoting either CONFIRMED or PENDING status
def parseUserPicks(user, picks):
res = []
for pick in picks:
if (user in pick.picks):
res.append({"id": pick.email, "status": "CONFIRMED"})
else:
res.append({"id": pick.email, "status": "PENDING"})
return res
#Adds buffer around location
def makeBuffer(lat,lon,miles,direction):
#This earth radius in miles may not be entirely accurate - there are various numbers and the earth is not a perfect sphere
#for the case of a buffer though, probably doesn't really matter
earthRadiusMiles = 3959
northwest = math.radians(315)
southeast = math.radians(135)
lat = math.radians(lat)
lon = math.radians(lon)
#cast as float or this breaks, because angular direction is a tiny tiny number
angularDirection = float(miles)/float(earthRadiusMiles)
if(direction=="NW"):
bearing = northwest
if(direction=="SE"):
bearing = southeast
newLat = math.asin(math.sin(lat)*math.cos(angularDirection)) + math.cos(lat)*math.sin(angularDirection)*math.cos(bearing)
newLon = lon + math.atan2(math.sin(bearing)*math.sin(angularDirection)*math.cos(lat), math.cos(angularDirection)-math.sin(lat)*math.sin(newLat))
return math.degrees(newLat), math.degrees(newLon)
#Generates unique hash for trip route urls
def makeURL(id):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
while(urlExists(url,False)):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
return url
| mit |
aldebaran/qibuild | python/qibuild/test/projects/usefoopymodule/test.py | 1 | 1167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
This is an equivalent of a C++ program trying to load a
Python module using libqi, but written in Python.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
def main():
""" Main Entry Point """
from_env = os.environ.get("QI_ADDITIONAL_SDK_PREFIXES")
if not from_env:
sys.exit("QI_ADDITIONAL_SDK_PREFIXES not set")
prefixes = from_env.split(os.path.pathsep)
found = False
for prefix in prefixes:
candidate = os.path.join(prefix, "share", "qi", "module", "foo.mod")
if os.path.exists(candidate):
found = True
with open(candidate, "r") as fp:
contents = fp.read()
if contents != "python\n":
sys.exit("Expected python\\n, got: " + contents)
if not found:
sys.exit("foo.mod not found")
import foo
if __name__ == "__main__":
main()
| bsd-3-clause |
pan1cz/locust | locust/test/test_locust_class.py | 4 | 19425 | import six
from locust import InterruptTaskSet, ResponseError
from locust.core import HttpLocust, Locust, TaskSet, events, task
from locust.exception import (CatchResponseError, LocustError, RescheduleTask,
RescheduleTaskImmediately)
from .testcases import LocustTestCase, WebserverTestCase
class TestTaskSet(LocustTestCase):
def setUp(self):
super(TestTaskSet, self).setUp()
class User(Locust):
host = "127.0.0.1"
self.locust = User()
def test_task_ratio(self):
t1 = lambda l: None
t2 = lambda l: None
class MyTasks(TaskSet):
tasks = {t1:5, t2:2}
l = MyTasks(self.locust)
t1_count = len([t for t in l.tasks if t == t1])
t2_count = len([t for t in l.tasks if t == t2])
self.assertEqual(t1_count, 5)
self.assertEqual(t2_count, 2)
def test_task_decorator_ratio(self):
t1 = lambda l: None
t2 = lambda l: None
class MyTasks(TaskSet):
tasks = {t1:5, t2:2}
host = ""
@task(3)
def t3(self):
pass
@task(13)
def t4(self):
pass
l = MyTasks(self.locust)
t1_count = len([t for t in l.tasks if t == t1])
t2_count = len([t for t in l.tasks if t == t2])
t3_count = len([t for t in l.tasks if t.__name__ == MyTasks.t3.__name__])
t4_count = len([t for t in l.tasks if t.__name__ == MyTasks.t4.__name__])
self.assertEqual(t1_count, 5)
self.assertEqual(t2_count, 2)
self.assertEqual(t3_count, 3)
self.assertEqual(t4_count, 13)
def test_on_start(self):
class MyTasks(TaskSet):
t1_executed = False
t2_executed = False
def on_start(self):
self.t1()
def t1(self):
self.t1_executed = True
@task
def t2(self):
self.t2_executed = True
raise InterruptTaskSet(reschedule=False)
l = MyTasks(self.locust)
self.assertRaises(RescheduleTask, lambda: l.run())
self.assertTrue(l.t1_executed)
self.assertTrue(l.t2_executed)
def test_schedule_task(self):
self.t1_executed = False
self.t2_arg = None
def t1(l):
self.t1_executed = True
def t2(l, arg):
self.t2_arg = arg
class MyTasks(TaskSet):
tasks = [t1, t2]
taskset = MyTasks(self.locust)
taskset.schedule_task(t1)
taskset.execute_next_task()
self.assertTrue(self.t1_executed)
taskset.schedule_task(t2, args=["argument to t2"])
taskset.execute_next_task()
self.assertEqual("argument to t2", self.t2_arg)
def test_schedule_task_with_kwargs(self):
class MyTasks(TaskSet):
@task
def t1(self):
self.t1_executed = True
@task
def t2(self, *args, **kwargs):
self.t2_args = args
self.t2_kwargs = kwargs
loc = MyTasks(self.locust)
loc.schedule_task(loc.t2, [42], {"test_kw":"hello"})
loc.execute_next_task()
self.assertEqual((42, ), loc.t2_args)
self.assertEqual({"test_kw":"hello"}, loc.t2_kwargs)
loc.schedule_task(loc.t2, args=[10, 4], kwargs={"arg1":1, "arg2":2})
loc.execute_next_task()
self.assertEqual((10, 4), loc.t2_args)
self.assertEqual({"arg1":1, "arg2":2}, loc.t2_kwargs)
def test_schedule_task_bound_method(self):
class MyTasks(TaskSet):
host = ""
@task()
def t1(self):
self.t1_executed = True
self.schedule_task(self.t2)
def t2(self):
self.t2_executed = True
taskset = MyTasks(self.locust)
taskset.schedule_task(taskset.get_next_task())
taskset.execute_next_task()
self.assertTrue(taskset.t1_executed)
taskset.execute_next_task()
self.assertTrue(taskset.t2_executed)
def test_taskset_inheritance(self):
def t1(l):
pass
class MyBaseTaskSet(TaskSet):
tasks = [t1]
host = ""
class MySubTaskSet(MyBaseTaskSet):
@task
def t2(self):
pass
l = MySubTaskSet(self.locust)
self.assertEqual(2, len(l.tasks))
self.assertEqual([t1, six.get_unbound_function(MySubTaskSet.t2)], l.tasks)
def test_task_decorator_with_or_without_argument(self):
class MyTaskSet(TaskSet):
@task
def t1(self):
pass
taskset = MyTaskSet(self.locust)
self.assertEqual(len(taskset.tasks), 1)
class MyTaskSet2(TaskSet):
@task()
def t1(self):
pass
taskset = MyTaskSet2(self.locust)
self.assertEqual(len(taskset.tasks), 1)
class MyTaskSet3(TaskSet):
@task(3)
def t1(self):
pass
taskset = MyTaskSet3(self.locust)
self.assertEqual(len(taskset.tasks), 3)
def test_sub_taskset(self):
class MySubTaskSet(TaskSet):
min_wait = 1
max_wait = 1
@task()
def a_task(self):
self.locust.sub_locust_task_executed = True
self.interrupt()
class MyTaskSet(TaskSet):
tasks = [MySubTaskSet]
self.sub_locust_task_executed = False
loc = MyTaskSet(self.locust)
loc.schedule_task(loc.get_next_task())
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertTrue(self.locust.sub_locust_task_executed)
def test_sub_taskset_tasks_decorator(self):
class MyTaskSet(TaskSet):
@task
class MySubTaskSet(TaskSet):
min_wait = 1
max_wait = 1
@task()
def a_task(self):
self.locust.sub_locust_task_executed = True
self.interrupt()
self.sub_locust_task_executed = False
loc = MyTaskSet(self.locust)
loc.schedule_task(loc.get_next_task())
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertTrue(self.locust.sub_locust_task_executed)
def test_sub_taskset_arguments(self):
class MySubTaskSet(TaskSet):
min_wait = 1
max_wait = 1
@task()
def a_task(self):
self.locust.sub_taskset_args = self.args
self.locust.sub_taskset_kwargs = self.kwargs
self.interrupt()
class MyTaskSet(TaskSet):
sub_locust_args = None
sub_locust_kwargs = None
tasks = [MySubTaskSet]
self.locust.sub_taskset_args = None
self.locust.sub_taskset_kwargs = None
loc = MyTaskSet(self.locust)
loc.schedule_task(MySubTaskSet, args=[1,2,3], kwargs={"hello":"world"})
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertEqual((1,2,3), self.locust.sub_taskset_args)
self.assertEqual({"hello":"world"}, self.locust.sub_taskset_kwargs)
def test_interrupt_taskset_in_main_taskset(self):
class MyTaskSet(TaskSet):
@task
def interrupted_task(self):
raise InterruptTaskSet(reschedule=False)
class MyLocust(Locust):
host = "http://127.0.0.1"
task_set = MyTaskSet
class MyTaskSet2(TaskSet):
@task
def interrupted_task(self):
self.interrupt()
class MyLocust2(Locust):
host = "http://127.0.0.1"
task_set = MyTaskSet2
l = MyLocust()
l2 = MyLocust2()
self.assertRaises(LocustError, lambda: l.run())
self.assertRaises(LocustError, lambda: l2.run())
try:
l.run()
except LocustError as e:
self.assertTrue("MyLocust" in e.args[0], "MyLocust should have been referred to in the exception message")
self.assertTrue("MyTaskSet" in e.args[0], "MyTaskSet should have been referred to in the exception message")
except:
raise
try:
l2.run()
except LocustError as e:
self.assertTrue("MyLocust2" in e.args[0], "MyLocust2 should have been referred to in the exception message")
self.assertTrue("MyTaskSet2" in e.args[0], "MyTaskSet2 should have been referred to in the exception message")
except:
raise
def test_on_start_interrupt(self):
class SubTaskSet(TaskSet):
def on_start(self):
if self.kwargs["reschedule"]:
self.interrupt(reschedule=True)
else:
self.interrupt(reschedule=False)
class MyLocust(Locust):
host = ""
task_set = SubTaskSet
l = MyLocust()
task_set = SubTaskSet(l)
self.assertRaises(RescheduleTaskImmediately, lambda: task_set.run(reschedule=True))
self.assertRaises(RescheduleTask, lambda: task_set.run(reschedule=False))
def test_parent_attribute(self):
from locust.exception import StopLocust
parents = {}
class SubTaskSet(TaskSet):
def on_start(self):
parents["sub"] = self.parent
@task
class SubSubTaskSet(TaskSet):
def on_start(self):
parents["subsub"] = self.parent
@task
def stop(self):
raise StopLocust()
class RootTaskSet(TaskSet):
tasks = [SubTaskSet]
class MyLocust(Locust):
host = ""
task_set = RootTaskSet
l = MyLocust()
l.run()
self.assertTrue(isinstance(parents["sub"], RootTaskSet))
self.assertTrue(isinstance(parents["subsub"], SubTaskSet))
class TestWebLocustClass(WebserverTestCase):
def test_get_request(self):
self.response = ""
def t1(l):
self.response = l.client.get("/ultra_fast")
class MyLocust(HttpLocust):
tasks = [t1]
host = "http://127.0.0.1:%i" % self.port
my_locust = MyLocust()
t1(my_locust)
self.assertEqual(self.response.text, "This is an ultra fast response")
def test_client_request_headers(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test":"hello"}).text)
def test_client_get(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("GET", locust.client.get("/request_method").text)
def test_client_get_absolute_url(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).text)
def test_client_post(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("POST", locust.client.post("/request_method", {"arg":"hello world"}).text)
self.assertEqual("hello world", locust.client.post("/post", {"arg":"hello world"}).text)
def test_client_put(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("PUT", locust.client.put("/request_method", {"arg":"hello world"}).text)
self.assertEqual("hello world", locust.client.put("/put", {"arg":"hello world"}).text)
def test_client_delete(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("DELETE", locust.client.delete("/request_method").text)
self.assertEqual(200, locust.client.delete("/request_method").status_code)
def test_client_head(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual(200, locust.client.head("/request_method").status_code)
def test_client_basic_auth(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
class MyAuthorizedLocust(HttpLocust):
host = "http://locust:menace@127.0.0.1:%i" % self.port
class MyUnauthorizedLocust(HttpLocust):
host = "http://locust:wrong@127.0.0.1:%i" % self.port
locust = MyLocust()
unauthorized = MyUnauthorizedLocust()
authorized = MyAuthorizedLocust()
response = authorized.client.get("/basic_auth")
self.assertEqual(200, response.status_code)
self.assertEqual("Authorized", response.text)
self.assertEqual(401, locust.client.get("/basic_auth").status_code)
self.assertEqual(401, unauthorized.client.get("/basic_auth").status_code)
def test_log_request_name_argument(self):
from locust.stats import global_stats
self.response = ""
class MyLocust(HttpLocust):
tasks = []
host = "http://127.0.0.1:%i" % self.port
@task()
def t1(l):
self.response = l.client.get("/ultra_fast", name="new name!")
my_locust = MyLocust()
my_locust.t1()
self.assertEqual(1, global_stats.get("new name!", "GET").num_requests)
self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests)
def test_locust_client_error(self):
class MyTaskSet(TaskSet):
@task
def t1(self):
self.client.get("/")
self.interrupt()
class MyLocust(Locust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
my_locust = MyLocust()
self.assertRaises(LocustError, lambda: my_locust.client.get("/"))
my_taskset = MyTaskSet(my_locust)
self.assertRaises(LocustError, lambda: my_taskset.client.get("/"))
def test_redirect_url_original_path_as_name(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
l.client.get("/redirect")
from locust.stats import global_stats
self.assertEqual(1, len(global_stats.entries))
self.assertEqual(1, global_stats.get("/redirect", "GET").num_requests)
self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests)
class TestCatchResponse(WebserverTestCase):
def setUp(self):
super(TestCatchResponse, self).setUp()
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
self.locust = MyLocust()
self.num_failures = 0
self.num_success = 0
def on_failure(request_type, name, response_time, exception):
self.num_failures += 1
self.last_failure_exception = exception
def on_success(**kwargs):
self.num_success += 1
events.request_failure += on_failure
events.request_success += on_success
def test_catch_response(self):
self.assertEqual(500, self.locust.client.get("/fail").status_code)
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
with self.locust.client.get("/ultra_fast", catch_response=True) as response: pass
self.assertEqual(1, self.num_failures)
self.assertEqual(1, self.num_success)
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
raise ResponseError("Not working")
self.assertEqual(2, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_http_fail(self):
with self.locust.client.get("/fail", catch_response=True) as response: pass
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_http_manual_fail(self):
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
response.failure("Haha!")
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
self.assertTrue(
isinstance(self.last_failure_exception, CatchResponseError),
"Failure event handler should have been passed a CatchResponseError instance"
)
def test_catch_response_http_manual_success(self):
with self.locust.client.get("/fail", catch_response=True) as response:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_allow_404(self):
with self.locust.client.get("/does/not/exist", catch_response=True) as response:
self.assertEqual(404, response.status_code)
if response.status_code == 404:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_interrupt_taskset_with_catch_response(self):
class MyTaskSet(TaskSet):
@task
def interrupted_task(self):
with self.client.get("/ultra_fast", catch_response=True) as r:
raise InterruptTaskSet()
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
l = MyLocust()
ts = MyTaskSet(l)
self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task())
self.assertEqual(0, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_connection_error_success(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:1"
l = MyLocust()
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.success()
self.assertEqual(1, self.num_success)
self.assertEqual(0, self.num_failures)
def test_catch_response_connection_error_fail(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:1"
l = MyLocust()
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.success()
self.assertEqual(1, self.num_success)
self.assertEqual(0, self.num_failures)
| mit |
neumerance/deploy | horizon/browsers/breadcrumb.py | 7 | 1850 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from horizon.utils import html
class Breadcrumb(html.HTMLElement):
def __init__(self, request, template, root,
subfolder_path, url, attr=None):
super(Breadcrumb, self).__init__()
self.template = template
self.request = request
self.root = root
self.subfolder_path = subfolder_path
self.url = url
self._subfolders = []
def get_subfolders(self):
if self.subfolder_path and not self._subfolders:
(parent, slash, folder) = self.subfolder_path.strip('/') \
.rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
self._subfolders.insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return self._subfolders
def render(self):
""" Renders the table using the template from the table options. """
breadcrumb_template = template.loader.get_template(self.template)
extra_context = {"breadcrumb": self}
context = template.RequestContext(self.request, extra_context)
return breadcrumb_template.render(context)
| apache-2.0 |
highweb-project/highweb-webcl-html5spec | chrome/common/extensions/docs/server2/permissions_data_source.py | 37 | 3654 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from itertools import ifilter
from operator import itemgetter
from data_source import DataSource
from extensions_paths import PRIVATE_TEMPLATES
from future import Future
from platform_util import GetPlatforms
def _ListifyPermissions(permissions):
'''Filter out any permissions that do not have a description or with a name
that ends with Private then sort permissions features by name into a list.
'''
def filter_permissions(perm):
return 'description' in perm and not perm['name'].endswith('Private')
return sorted(
ifilter(filter_permissions, permissions.itervalues()),
key=itemgetter('name'))
def _AddDependencyDescriptions(permissions, api_features):
'''Use |api_features| to determine the dependencies APIs have on permissions.
Add descriptions to |permissions| based on those dependencies.
'''
for name, permission in permissions.iteritems():
# Don't overwrite the description created by expanding a partial template.
if 'partial' in permission:
continue
has_deps = False
if name in api_features:
for dependency in api_features[name].get('dependencies', ()):
if dependency.startswith('permission:'):
has_deps = True
if has_deps:
permission['partial'] = 'permissions/generic_description.html'
class PermissionsDataSource(DataSource):
'''Load and format permissions features to be used by templates.
'''
def __init__(self, server_instance, request):
self._platform_bundle = server_instance.platform_bundle
self._object_store = server_instance.object_store_creator.Create(
PermissionsDataSource)
self._template_cache = server_instance.compiled_fs_factory.ForTemplates(
server_instance.host_file_system_provider.GetMaster())
def _CreatePermissionsDataForPlatform(self, platform):
features_bundle = self._platform_bundle.GetFeaturesBundle(platform)
api_features_future = features_bundle.GetAPIFeatures()
permission_features_future = features_bundle.GetPermissionFeatures()
def resolve():
api_features = api_features_future.Get()
permission_features = permission_features_future.Get()
_AddDependencyDescriptions(permission_features, api_features)
# Turn partial templates into descriptions, ensure anchors are set.
for permission in permission_features.values():
if not 'anchor' in permission:
permission['anchor'] = permission['name']
if 'partial' in permission:
permission['description'] = self._template_cache.GetFromFile(
PRIVATE_TEMPLATES + permission['partial']).Get()
del permission['partial']
return _ListifyPermissions(permission_features)
return Future(callback=resolve)
def _CreatePermissionsData(self):
permissions_data_futures = dict(
(platform, self._CreatePermissionsDataForPlatform(platform))
for platform in GetPlatforms())
def resolve():
return dict(('declare_' + platform, future.Get())
for platform, future in permissions_data_futures.iteritems())
return Future(callback=resolve)
def _GetCachedPermissionsData(self):
data = self._object_store.Get('permissions_data').Get()
if data is None:
data = self._CreatePermissionsData().Get()
self._object_store.Set('permissions_data', data)
return data
def get(self, key):
return self._GetCachedPermissionsData().get(key)
def Refresh(self):
return self._CreatePermissionsData()
| bsd-3-clause |
anpingli/openshift-ansible | roles/lib_openshift/src/class/oc_pvc.py | 38 | 5477 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCPVC(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'pvc'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCPVC, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._pvc = None
@property
def pvc(self):
''' property function pvc'''
if not self._pvc:
self.get()
return self._pvc
@pvc.setter
def pvc(self, data):
''' setter function for yedit var '''
self._pvc = data
def bound(self):
'''return whether the pvc is bound'''
if self.pvc.get_volume_name():
return True
return False
def exists(self):
''' return whether a pvc exists '''
if self.pvc:
return True
return False
def get(self):
'''return pvc information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.pvc = PersistentVolumeClaim(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
if self.pvc.get_volume_name() or self.pvc.is_bound():
return False
skip = []
return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
pconfig = PersistentVolumeClaimConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['access_modes'],
params['volume_capacity'],
params['selector'],
params['storage_class_name'],
)
oc_pvc = OCPVC(pconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': state}
########
# Delete
########
if state == 'absent':
if oc_pvc.exists():
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_pvc.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_pvc.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_pvc.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name():
api_rval['msg'] = '##### - This volume is currently bound. Will not update - ####'
return {'changed': False, 'results': api_rval, 'state': state}
if oc_pvc.needs_update():
api_rval = oc_pvc.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
| apache-2.0 |
MikeDacre/fyrd | fyrd/job.py | 1 | 56470 | # -*- coding: utf-8 -*-
"""
Class and methods to handle Job submission.
This module only defines a single object: the Job class.
"""
import os as _os
import sys as _sys
from uuid import uuid4 as _uuid
from time import sleep as _sleep
from datetime import datetime as _dt
from traceback import print_tb as _tb
# Try to use dill, revert to pickle if not found
import dill as _pickle
from six import reraise as _reraise
from six import text_type as _txt
from six import string_types as _str
from six import integer_types as _int
###############################################################################
# Our functions #
###############################################################################
from . import run as _run
from . import conf as _conf
from . import queue as _queue
from . import logme as _logme
from . import script_runners as _scrpts
from . import batch_systems as _batch
from . import ClusterError as _ClusterError
from .submission_scripts import Function as _Function
_options = _batch.options
__all__ = ['Job']
###############################################################################
# The Job Class #
###############################################################################
class Job(object):
"""Information about a single job on the cluster.
Holds information about submit time, number of cores, the job script,
and more.
Below are the core attributes and methods required to use this class,
note that this is an incomplete list.
Attributes
----------
id : str
The ID number for the job, only set once the job has been submitted
name : str
The name of the job
command : str or callable
The function or shell script that will be submitted
args : list
A list of arguments to the shell script or function in command
kwargs : dict
A dictionary of keyword arguments to the function (not shell script) in
command
state : str
A slurm-style one word description of the state of the job, one of:
- Not_Submitted
- queued
- running
- completed
- failed
submitted : bool
written : bool
done : bool
running : bool
dependencies : list
A list of dependencies associated with this job
out : str
The output of the function or a copy of stdout for a script
stdout : str
Any output to STDOUT
stderr : str
Any output to STDERR
exitcode : int
The exitcode of the running processes (the script runner if the Job is
a function).
submit_time : datetime
A datetime object for the time of submission
start : datetime
A datetime object for time execution started on the remote node.
end : datetime
A datetime object for time execution ended on the remote node.
runtime : timedelta
A timedelta object containing runtime.
files : list
A list of script files associated with this job
nodes : list
A list of nodes associated with this job
modules : list
A list of modules associated with this job
clean_files : bool
If True, auto-delete script and function files on job completion
clean_outputs : bool
If True, auto-delete script outputs and error files on job completion
kwds : dict
Keyword arguments to the batch system (e.g. mem, cores, walltime), this
is initialized by taking every additional keyword argument to the Job.
e.g. Job('echo hi', profile=large, walltime='00:20:00', mem='2GB') will
result in kwds containing {walltime: '00:20:00', mem: '2GB'}. There is
**no need to alter this manually**.
submit_args : list
List of parsed submit arguments that will be passed at runtime to the
submit function. **Generated within the Job object**, no need to set
manually, use the `kwds` attribute instead.
Methods
-------
initialize()
Use attributes to prep job for running
gen_scripts()
Create script files (but do not write them)
write(overwrite=True)
Write scripts to files
submit(wait_on_max_queue=True)
Submit the job if it is ready and the queue is sufficiently open.
resubmit(wait_on_max_queue=True)
Clean all internal states with `scrub()` and then resubmit
kill(confirm=True)
Immediately kill the currently running job
clean(delete_outputs=True, get_outputs=True)
Delete any files created by this object
scrub(confirm=True)
Clean everything and reset to an unrun state.
update(fetch_info=True)
Update our status from the queue
wait()
Block until the job is done
get()
Block until the job is done and then return the output (stdout if job
is a script), by default saves all outputs to self (i.e. .out, .stdout,
.stderr) and deletes all intermediate files before returning. If `save`
argument is `False`, does not delete the output files by default.
Notes
-----
Printing or reproducing the class will display detailed job information.
Both `wait()` and `get()` will update the queue every few seconds
(defined by the queue_update item in the config) and add queue information
to the job as they go.
If the job disappears from the queue with no information, it will be listed
as 'completed'.
All jobs have a .submission attribute, which is a Script object containing
the submission script for the job and the file name, plus a 'written' bool
that checks if the file exists.
In addition, some batch systems (e.g. SLURM) have an .exec_script
attribute, which is a Script object containing the shell command to run.
This difference is due to the fact that some SLURM systems execute multiple
lines of the submission file at the same time.
Finally, if the job command is a function, this object will also contain a
`.function` attribute, which contains the script to run the function.
"""
id = None
name = None
suffix = None
submitted = False
written = False
found = False
disappeared = False
submit_time = None
state = None
kind = None
# Arguments
kwds = None
kwargs = None
submit_args = None
# Runtime
nodes = None
cores = None
modules = None
# Files
outfile = None
errfile = None
# Scripts
submission = None
exec_script = None
function = None
imports = None
# Dependencies
dependencies = None
# Pickled output file for functions
poutfile = None
# Holds queue information in torque and slurm
queue_info = None
# Output tracking
_got_out = False
_got_stdout = False
_got_stderr = False
_got_exitcode = False
_found_files = False
_out = None
_stdout = None
_stderr = None
_exitcode = None
# Time tracking
_got_times = False
start = None
end = None
# Track update status
_updating = False
# Track preparations
initialized = False
scripts_ready = False
_kwargs = None
# Auto Cleaning
clean_files = _conf.get_option('jobs', 'clean_files')
clean_outputs = _conf.get_option('jobs', 'clean_outputs')
def __init__(self, command, args=None, kwargs=None, name=None, qtype=None,
profile=None, queue=None, **kwds):
"""Initialization function arguments.
Parameters
----------
command : function/str
The command or function to execute.
args : tuple/dict, optional
Optional arguments to add to command, particularly useful for
functions.
kwargs : dict, optional
Optional keyword arguments to pass to the command, only used for
functions.
name : str, optional
Optional name of the job. If not defined, guessed. If a job of the
same name is already queued, an integer job number (not the queue
number) will be added, ie. <name>.1
qtype : str, optional
Override the default queue type
profile : str, optional
The name of a profile saved in the conf
queue : fyrd.queue.Queue, optional
An already initiated Queue class to use.
kwds
*All other keywords are parsed into cluster keywords by the options
system.* For available keywords see `fyrd.option_help()`
"""
########################
# Sanitize arguments #
########################
_logme.log('Args pre-check: {}'.format(kwds), 'debug')
kwds = _options.check_arguments(kwds)
_logme.log('Args post-check: {}'.format(kwds), 'debug')
# Create a unique short UUID for this job
self.uuid = str(_uuid()).split('-')[0]
# Path handling
[
kwds, self.runpath, self.outpath, self.scriptpath
] = _conf.get_job_paths(kwds)
# Save command
self.command = command
self.args = args
self.kwargs = kwargs
self.profile = profile
# Get environment
if not _batch.MODE:
_batch.get_cluster_environment()
if not qtype:
qtype = _batch.MODE
if queue:
if not isinstance(queue, _queue.Queue):
raise TypeError(
'queue must be fyrd.queue.Queue is {0}'.format(type(queue))
)
self.queue = queue
else:
self.queue = _queue.default_queue(qtype)
self.batch = _batch.get_batch_system(qtype)
self.qtype = qtype
self.state = 'Not_Submitted'
# Save keywords for posterity and parsing
self.kwds = kwds
self.name = self._update_name(name)
##########################################################################
# Public Methods #
##########################################################################
################
# Properties #
################
@property
def files(self):
"""Build a list of files associated with this class."""
files = [self.submission]
if self.kind == 'script':
files.append(self.exec_script)
if self.kind == 'function':
files.append(self.function)
return files
@property
def runtime(self):
"""Return the runtime."""
if not self.done:
_logme.log('Cannot get runtime as not yet complete.' 'warn')
return None
if not self.start:
self.get_times()
return self.end-self.start
@property
def done(self):
"""Check if completed or not.
Updates the Job and Queue.
Returns
-------
done : bool
"""
# We have the same statement twice to try and avoid updating.
if self.state in _batch.DONE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.DONE_STATES:
return True
return False
@property
def running(self):
"""Check if running or not.
Updates the Job and Queue.
Returns
-------
running : bool
"""
# We have the same statement twice to try to avoid updating.
if self.state in _batch.ACTIVE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.ACTIVE_STATES:
return True
return False
@property
def outfiles(self):
"""A list of all outfiles associated with this Job."""
outfiles = [self.outfile, self.errfile]
if self.poutfile:
outfiles.append(self.poutfile)
return outfiles
@property
def incomplete_outfiles(self):
"""A list of all outfiles that haven't already been fetched."""
outfiles = []
if self.outfile and not self._got_stdout:
outfiles.append(self.outfile)
if self.errfile and not self._got_stderr:
outfiles.append(self.errfile)
if self.poutfile and not self._got_out:
outfiles.append(self.poutfile)
return outfiles
@property
def exitcode(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def code(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def out(self):
"""Return output."""
return self.get_output()
@property
def stdout(self):
"""Return output."""
return self.get_stdout()
@property
def stderr(self):
"""Return stderr."""
return self.get_stderr()
@property
def err(self):
"""Return stderr."""
return self.get_stderr()
###############################
# Core Job Handling Methods #
###############################
def initialize(self):
"""Make self runnable using set attributes."""
kwds = self.kwds
# Override autoclean state (set in config file)
if 'clean_files' in kwds:
self.clean_files = kwds.pop('clean_files')
if 'clean_outputs' in kwds:
self.clean_outputs = kwds.pop('clean_outputs')
# Set suffix
self.suffix = kwds.pop('suffix') if 'suffix' in kwds \
else _conf.get_option('jobs', 'suffix')
# Merge in profile, this includes all args from the DEFAULT profile
# as well, ensuring that those are always set at a minumum.
profile = self.profile if self.profile else 'DEFAULT'
prof = _conf.get_profile(profile)
if not prof:
raise _ClusterError('No profile found for {}'.format(profile))
for k,v in prof.args.items():
if k not in kwds:
kwds[k] = v
# Use the default profile as a backup if any arguments missing
default_args = _conf.DEFAULT_PROFILES['DEFAULT']
default_args.update(_conf.get_profile('DEFAULT').args)
for opt, arg in default_args.items():
if opt not in kwds:
_logme.log('{} not in kwds, adding from default: {}:{}'
.format(opt, opt, arg), 'debug')
kwds[opt] = arg
# Set modules
self.modules = kwds.pop('modules') if 'modules' in kwds else None
if self.modules:
self.modules = _run.opt_split(self.modules, (',', ';'))
# Make sure args are a tuple
if self.args:
self.args = tuple(_run.listify(self.args))
# In case cores are passed as None
if 'nodes' not in kwds:
kwds['nodes'] = default_args['nodes']
if 'cores' not in kwds:
kwds['cores'] = default_args['cores']
self.nodes = kwds['nodes']
self.cores = kwds['cores']
# Set output files
if 'outfile' in kwds:
pth, fle = _os.path.split(kwds['outfile'])
if not pth:
pth = self.outpath
kwds['outfile'] = _os.path.join(pth, fle)
else:
kwds['outfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'out']))
if 'errfile' in kwds:
pth, fle = _os.path.split(kwds['errfile'])
if not pth:
pth = self.outpath
kwds['errfile'] = _os.path.join(pth, fle)
else:
kwds['errfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'err']))
self.outfile = kwds['outfile']
self.errfile = kwds['errfile']
# Check and set dependencies
if 'depends' in kwds:
dependencies = _run.listify(kwds.pop('depends'))
self.dependencies = []
errmsg = 'Dependencies must be number, numeric string or Job'
for dependency in dependencies:
if not isinstance(dependency, (_str, _txt, Job)):
raise _ClusterError(errmsg)
self.dependencies.append(dependency)
# Save parsed keywords as _kwargs
self._kwargs = kwds
self.initialized = True
return self
def gen_scripts(self):
"""Create the script objects from the set parameters."""
if not self.initialized:
self.initialize()
######################################
# Command and Function Preparation #
######################################
command = self.command
args = self.args
kwargs = self.kwargs # Not self._kwargs
name = self._update_name()
kwds = self._kwargs
# Get imports
imports = kwds.pop('imports') if 'imports' in kwds else None
# Get syspaths
syspaths = kwds.pop('syspaths') if 'syspaths' in kwds else None
# Split out sys.paths from imports and set imports in self
if imports:
self.imports = []
syspaths = syspaths if syspaths else []
for i in imports:
if i.startswith('sys.path.append')\
or i.startswith('sys.path.insert'):
syspaths.append(i)
else:
self.imports.append(i)
# Function specific initialization
if callable(command):
self.kind = 'function'
script_file = _os.path.join(
self.scriptpath, '{}_func.{}.py'.format(name, self.suffix)
)
self.poutfile = self.outfile + '.func.pickle'
self.function = _Function(
file_name=script_file, function=command, args=args,
kwargs=kwargs, imports=self.imports, syspaths=syspaths,
outfile=self.poutfile
)
# Collapse the _command into a python call to the function script
executable = '#!/usr/bin/env python{}'.format(
_sys.version_info.major) if _conf.get_option(
'jobs', 'generic_python') else _sys.executable
command = '{} {}'.format(executable, self.function.file_name)
args = None
else:
self.kind = 'script'
self.poutfile = None
# Collapse args into command
command = command + ' '.join(args) if args else command
#####################
# Script Creation #
#####################
# Build execution wrapper with modules
modstr = ''
if self.modules:
for module in self.modules:
modstr += 'module load {}\n'.format(module)
# Add all of the keyword arguments at once
opt_string, submit_args = _options.options_to_string(kwds, self.qtype)
precmd = opt_string + '\n\n' + modstr
self.submit_args = submit_args
# Create queue-dependent scripts
self.submission, self.exec_script = self.batch.gen_scripts(
self, command, args, precmd, modstr
)
self.scripts_ready = True
return self
def write(self, overwrite=True):
"""Write all scripts.
Parameters
----------
overwrite : bool, optional
Overwrite existing files, defaults to True.
Returns
-------
self : Job
"""
if not self.scripts_ready:
self.gen_scripts()
_logme.log('Writing files, overwrite={}'.format(overwrite), 'debug')
self.submission.write(overwrite)
if self.exec_script:
self.exec_script.write(overwrite)
if self.function:
self.function.write(overwrite)
self.written = True
return self
def submit(self, wait_on_max_queue=True, additional_keywords=None,
max_jobs=None):
"""Submit this job.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
additional_keywords : dict, optional
Pass this dictionary to the batch system submission function,
not necessary.
max_jobs : int, optional
Override the maximum number of jobs to wait for
Returns
-------
self : Job
"""
if self.submitted:
_logme.log('Not submitting, already submitted.', 'warn')
return self
if not self.written:
self.write()
# Check dependencies
dependencies = []
if self.dependencies:
for depend in self.dependencies:
if isinstance(depend, Job):
if not depend.id:
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has not been submitted',
'error'
)
return self
dependencies.append(str(depend.id))
else:
dependencies.append(str(depend))
# Wait on the queue if necessary
if wait_on_max_queue:
if not self._updating:
self.update()
self.queue.wait_to_submit(max_jobs)
# Only include queued or running dependencies
self.queue._update() # Force update
depends = []
for depend in dependencies:
dep_check = self.queue.check_dependencies(depend)
if dep_check == 'absent':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'is not in the queue',
'error'
)
return self
elif dep_check == 'good':
_logme.log(
'Dependency {} is complete, skipping'
.format(depend), 'debug'
)
elif dep_check == 'bad':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has failed',
'error'
)
return self
elif dep_check == 'active':
if self.queue.jobs[depend].state == 'completeing':
continue
_logme.log('Dependency {} is {}, adding to deps'
.format(depend, self.queue.jobs[depend].state),
'debug')
depends.append(depend)
else:
# This shouldn't happen ever
raise _ClusterError('fyrd.queue.Queue.check_dependencies() ' +
'returned an unrecognized value {0}'
.format(dep_check))
self.id = self.batch.submit(
self.submission.file_name,
dependencies=depends,
job=self, args=self.submit_args,
kwds=additional_keywords
)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
if not self.submitted:
raise _ClusterError('Submission appears to have failed, this '
"shouldn't happen")
return self
def resubmit(self, wait_on_max_queue=True, cancel_running=None):
"""Attempt to auto resubmit, deletes prior files.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
cancel_running : bool or None, optional
If the job is currently running, cancel it before resubmitting.
If None (default), will ask the user.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Returns
-------
self : Job
"""
if self.running:
if cancel_running is None:
cancel_running = _run.get_yesno(
'Job currently running, cancel before resubmitting?', 'y'
)
if cancel_running:
self.kill(confirm=False)
self.scrub(confirm=False)
# Rerun
self.initialize()
self.gen_scripts()
self.write()
return self.submit(wait_on_max_queue)
def kill(self, confirm=True):
"""Kill the running job.
Parameters
----------
confirm : bool, optional
Returns
-------
self : Job
"""
if not self.submitted:
_logme.log('Job not submitted, cannot kill', 'warn')
return self
if self.done:
_logme.log('Job completed, cannot kill', 'warn')
return self
if confirm:
if not _run.get_yesno(
'This will terminate the running job, continue?', 'n'
):
return self
self.batch.kill(self.id)
return self
def clean(self, delete_outputs=None, get_outputs=True):
"""Delete all scripts created by this module, if they were written.
Parameters
----------
delete_outputs : bool, optional
also delete all output and err files, but get their contents first.
get_outputs : bool, optional
if delete_outputs, save outputs before deleting.
Returns
-------
self : Job
"""
_logme.log('Cleaning outputs, delete_outputs={}'
.format(delete_outputs), 'debug')
if not isinstance(delete_outputs, bool):
delete_outputs = self.clean_outputs
assert isinstance(delete_outputs, bool)
for jobfile in [self.submission, self.exec_script, self.function]:
if jobfile:
jobfile.clean()
if delete_outputs:
_logme.log('Deleting output files.', 'debug')
if get_outputs:
self.fetch_outputs(delete_files=True)
for f in self.outfiles:
if _os.path.isfile(f):
_logme.log('Deleteing {}'.format(f), 'debug')
_os.remove(f)
return self
def scrub(self, confirm=True):
"""Clean everything and reset to an unrun state.
Parameters
----------
confirm : bool, optional
Get user input before proceeding
Returns
-------
self : Job
"""
msg = ("This will delete all outputs stored in this job, as well "
"as all output files, job files, and scripts. Are you sure "
"you want to do this?")
if confirm:
_run.get_yesno(msg, default='n')
# Clean old set
self.clean(delete_outputs=True)
# Reset runtime attributes
self.initialized = False
self.scripts_ready = False
self.written = False
self.submitted = False
self.id = None
self.found = False
self.queue_info = None
self.state = 'Not_Submitted'
self._got_out = False
self._got_stdout = False
self._got_stderr = False
self._got_exitcode = False
self._out = None
self._stdout = None
self._stderr = None
self._exitcode = None
self._got_times = False
self._updating = False
self._found_files = False
self.start = None
self.end = None
return self.update()
######################
# Queue Management #
######################
def update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
Returns
-------
self : Job
"""
if not self._updating:
self._update(fetch_info)
else:
_logme.log('Already updating, aborting.', 'debug')
return self
def update_queue_info(self):
"""Set (and return) queue_info from the queue even if done."""
_logme.log('Updating queue_info', 'debug')
queue_info1 = self.queue[self.id]
self.queue.update()
queue_info2 = self.queue[self.id]
if queue_info2:
self.queue_info = queue_info2
elif queue_info1:
self.queue_info = queue_info1
elif self.queue_info is None and self.submitted:
_logme.log('Cannot find self in the queue and queue_info is empty',
'warn')
return self.queue_info
#################################
# Output Handling and Waiting #
#################################
def wait(self):
"""Block until job completes.
Returns
-------
success : bool or str
True if exitcode == 0, False if not, 'disappeared' if job lost from
queue.
"""
if not self.submitted:
if _conf.get_option('jobs', 'auto_submit'):
_logme.log('Auto-submitting as not submitted yet', 'debug')
self.submit()
else:
_logme.log('Cannot wait for result as job has not been ' +
'submitted', 'warn')
return False
self.update(fetch_info=False)
if not self.done:
_logme.log('Waiting for self {}'.format(self.name), 'debug')
status = self.queue.wait(self.id, return_disp=True)
if status == 'disappeared':
self.state = status
elif status is not True:
return False
else:
if not self._updating:
self.update()
if self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exitcode {}'
.format(self.exitcode), 'debug')
return False
if self._wait_for_files(caution_message=False):
if not self._updating:
self.update()
if self.state == 'disappeared':
_logme.log('Job files found for disappered job, assuming '
'success', 'info')
return 'disappeared'
return True
else:
if self.state == 'disappeared':
_logme.log('Disappeared job has no output files, assuming '
'failure', 'error')
return False
def get(self, save=True, cleanup=None, delete_outfiles=None,
del_no_save=None, raise_on_error=True):
"""Block until job completed and return output of script/function.
By default saves all outputs to this class and deletes all intermediate
files.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
cleanup : bool, optional
Clean all intermediate files after job completes.
delete_outfiles : bool, optional
Clean output files after job completes.
del_no_save : bool, optional
Delete output files even if `save` is `False`
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
str
Function output if Function, else STDOUT
"""
_logme.log(('Getting outputs, cleanup={}, autoclean={}, '
'delete_outfiles={}').format(
cleanup, self.clean_files, delete_outfiles
), 'debug')
# Wait for queue
status = self.wait()
if status is not True:
if status == 'disappeared':
msg = 'Job disappeared from queue'
_logme.log(msg + ', attempting to get '
'outputs', 'debug')
else:
msg = 'Wait failed'
_logme.log(msg + ', attempting to get outputs anyway',
'debug')
try:
self.fetch_outputs(save=save, delete_files=False,
get_stats=False)
except IOError:
_logme.log(msg + ' and files could not be found, job must '
'have failed', 'error')
if raise_on_error:
raise
return
if status != 'disappeared':
return
else:
# Get output
_logme.log('Wait complete, fetching outputs', 'debug')
self.fetch_outputs(save=save, delete_files=False)
out = self.out if save else self.get_output(save=save, update=False)
if isinstance(out, tuple) and issubclass(out[0], Exception):
if raise_on_error:
_reraise(*out)
else:
_logme.log('Job failed with exception {}'.format(out))
print(_tb(out[2]))
return out
# Cleanup
if cleanup is None:
cleanup = self.clean_files
else:
assert isinstance(cleanup, bool)
if delete_outfiles is None:
delete_outfiles = self.clean_outputs
if save is False:
delete_outfiles = del_no_save if del_no_save is not None else False
if cleanup:
self.clean(delete_outputs=delete_outfiles)
return out
def get_output(self, save=True, delete_file=None, update=True,
raise_on_error=True):
"""Get output of function or script.
This is the same as stdout for a script, or the function output for
a function.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.out, default True. Would be a good idea to
set to False if the output is huge.
delete_file : bool, optional
Delete the output file when getting
update : bool, optional
Update job info from queue first.
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
output : anything
The output of the script or function. Always a string if script.
"""
_logme.log(('Getting output, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if delete_file is None:
delete_file = self.clean_outputs
if self.kind == 'script':
return self.get_stdout(save=save, delete_file=delete_file,
update=update)
if self.done and self._got_out:
_logme.log('Getting output from _out', 'debug')
return self._out
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get pickled output before job completes',
'warn')
return None
_logme.log('Getting output from {}'.format(self.poutfile), 'debug')
if _os.path.isfile(self.poutfile):
with open(self.poutfile, 'rb') as fin:
out = _pickle.load(fin)
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.poutfile),
'debug')
_os.remove(self.poutfile)
if save:
self._out = out
self._got_out = True
if _run.is_exc(out):
_logme.log('{} failed with exception {}'.format(self, out[1]),
'error')
if raise_on_error:
_reraise(*out)
return out
else:
_logme.log('No file at {} even though job has completed!'
.format(self.poutfile), 'critical')
raise IOError('File not found: {}'.format(self.poutfile))
def get_stdout(self, save=True, delete_file=None, update=True):
"""Get stdout of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Also sets self.start and self.end from the contents of STDOUT if
possible.
Returns
-------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDOUT, with runtime info and trailing newline
removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stdout, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stdout:
_logme.log('Getting stdout from _stdout', 'debug')
return self._stdout
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDOUT ' +
'anyway', 'info')
_logme.log('Getting stdout from {}'.format(self._kwargs['outfile']),
'debug')
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, returning as is', 'info')
return stdout
if self.done:
self.get_times(update=False, stdout=stdout)
self.get_exitcode(update=False, stdout=stdout)
stdout = '\n'.join(stdouts[2:-3]) + '\n'
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['outfile']),
'debug')
_os.remove(self._kwargs['outfile'])
if save:
self._stdout = stdout
if self.done:
self._got_stdout = True
return stdout
else:
_logme.log('No file at {}, cannot get stdout'
.format(self._kwargs['outfile']), 'warn')
return None
def get_stderr(self, save=True, delete_file=None, update=True):
"""Get stderr of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDERR, with trailing newline removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stderr, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stderr:
_logme.log('Getting stderr from _stderr', 'debug')
return self._stderr
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDERR ' +
'anyway', 'info')
_logme.log('Getting stderr from {}'.format(self._kwargs['errfile']),
'debug')
if _os.path.isfile(self._kwargs['errfile']):
with open(self._kwargs['errfile']) as fin:
stderr = fin.read()
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['errfile']),
'debug')
_os.remove(self._kwargs['errfile'])
if save:
self._stderr = stderr
if self.done:
self._got_stderr = True
return stderr
else:
_logme.log('No file at {}, cannot get stderr'
.format(self._kwargs['errfile']), 'warn')
return None
def get_times(self, update=True, stdout=None):
"""Get stdout of function or script, same for both.
Sets self.start and self.end from the contents of STDOUT if
possible.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
start : datetime.datetime
end : datetime.datetime
"""
_logme.log('Getting times', 'debug')
if self.done and self._got_times:
_logme.log('Getting times from self.start, self.end', 'debug')
return self.start, self.end
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get times until job is complete.', 'warn')
return None, None
_logme.log('Getting times from {}'.format(self._kwargs['outfile']),
'debug')
if not stdout:
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
else:
_logme.log('No file at {}, cannot get times'
.format(self._kwargs['outfile']), 'warn')
return None
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, cannot get times', 'warn')
return None
# Get times
timefmt = '%y-%m-%d-%H:%M:%S'
try:
self.start = _dt.strptime(stdouts[0], timefmt)
self.end = _dt.strptime(stdouts[-1], timefmt)
except ValueError as err:
_logme.log('Time parsing failed with value error; ' +
'{}. '.format(err) + 'This may be because you ' +
'are using the script running that does not ' +
'include time tracking', 'debug')
self._got_times = True
return self.start, self.end
def get_exitcode(self, update=True, stdout=None):
"""Try to get the exitcode.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
exitcode : int
"""
_logme.log('Getting exitcode', 'debug')
if self.done and self._got_exitcode:
_logme.log('Getting exitcode from _exitcode', 'debug')
return self._exitcode
if update and not self._updating and not self.done:
self.update()
if not self.done:
_logme.log('Job is not complete, no exit code yet', 'info')
return None
if self.state == 'disappeared':
_logme.log('Cannot get exitcode for disappeared job', 'debug')
return 0
code = None
if not stdout and _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) > 3 and stdouts[-3] == 'Done':
if stdouts[-2].startswith('Code: '):
code = int(stdouts[-2].split(':')[-1].strip())
if code is None:
_logme.log('Getting exitcode from queue', 'debug')
if not self.queue_info:
self.queue_info = self.queue[self.id]
if hasattr(self.queue_info, 'exitcode'):
code = self.queue_info.exitcode
if code is None:
_logme.log('Failed to get exitcode for job', 'warn')
return None
self._exitcode = code
self._got_exitcode = True
if code != 0:
self.state = 'failed'
_logme.log('Job {} failed with exitcode {}'
.format(self.name, code), 'error')
return code
def fetch_outputs(self, save=True, delete_files=None, get_stats=True):
"""Save all outputs in their current state. No return value.
This method does not wait for job completion, but merely gets the
outputs. To wait for job completion, use `get()` instead.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
delete_files : bool, optional
Delete the output files when getting, only used if save is True
get_stats : bool, optional
Try to get exitcode.
"""
_logme.log('Saving outputs to self, delete_files={}'
.format(delete_files), 'debug')
if not self._updating:
self.update()
if delete_files is None:
delete_files = self.clean_outputs
if not self._got_exitcode and get_stats:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
if save:
self.get_output(save=True, delete_file=delete_files, update=False)
self.get_stdout(save=True, delete_file=delete_files, update=False)
self.get_stderr(save=True, delete_file=delete_files, update=False)
##############################
# Minor management methods #
##############################
def get_keywords(self):
"""Return a list of the keyword arguments used to make the job."""
return self.kwds
def set_keywords(self, kwds, replace=False):
"""Set the job keywords, just updates self.kwds.
Parameters
----------
kwds : dict
Set of valid arguments.
replace : bool, optional
Overwrite the keword arguments instead of updating.
"""
kwds = _options.check_arguments(kwds)
if replace:
self.kwds = kwds
else:
for key, value in kwds.items():
self.kwds[key] = value
###############
# Internals #
###############
def _update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
"""
_logme.log('Updating job.', 'debug')
self._updating = True
if self.done or not self.submitted:
self._updating = False
return
self.queue.update()
if self.submitted and self.id:
queue_info = self.queue[self.id]
if queue_info:
assert self.id == queue_info.id
self.found = True
self.queue_info = queue_info
self.state = self.queue_info.state
elif self.found:
_logme.log('Job appears to have disappeared, waiting for '
'reappearance, this may take a while', 'warn')
status = self.wait()
if status == 'disappeared':
_logme.log('Job disappeared, but the output files are '
'present assuming completion', 'info')
self.state = 'completed'
self.disappeared = True
elif not status:
_logme.log('Job appears to have failed and disappeared',
'error')
# If job not found after 30 seconds, assume trouble, check for
# completion
elif self.submitted and (_dt.now()-self.submit_time).seconds > 360:
if self._wait_for_files(btme=4, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
else:
self.state = 'failed'
self.disappeared = True
s = (_dt.now()-self.submit_time).seconds
_logme.log('Job not in queue after {} seconds '.format(s) +
'of searching and no outputs found, assuming '
'failure.', 'error')
elif self.submitted and (_dt.now()-self.submit_time).seconds > 30:
if self._wait_for_files(btme=1, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
if self.done and fetch_info:
if self._wait_for_files(btme=1, caution_message=False):
if not self._got_exitcode:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
self._updating = False
def _wait_for_files(self, btme=None, caution_message=False):
"""Block until files appear up to 'file_block_time' in config file.
Aborts after 2 seconds if job exit code is not 0.
Parameters
----------
btme : int, optional
Number of seconds to try for before giving up, default set in
config file.
caution_message : bool, optional
Display a message if this is taking a while.
Returns
-------
bool
True if files found
"""
if self._found_files:
_logme.log('Already found files, not waiting again', 'debug')
return True
wait_time = 0.1 # seconds
if btme:
lvl = 'debug'
else:
lvl = 'warn'
btme = _conf.get_option('jobs', 'file_block_time', 30)
start = _dt.now()
dsp = False
_logme.log('Checking for output files', 'debug')
while True:
runtime = (_dt.now() - start).seconds
if caution_message and runtime > 1:
_logme.log('Job complete.', 'info')
_logme.log('Waiting for output files to appear.', 'info')
caution_message = False
if not dsp and runtime > 20:
_logme.log('Still waiting for output files to appear',
'info')
dsp = True
count = 0
outfiles = self.incomplete_outfiles
tlen = len(outfiles)
if not outfiles:
_logme.log('No incomplete outfiles, assuming all found in ' +
'{} seconds'.format(runtime), 'debug')
break
for i in outfiles:
if _os.path.isfile(i):
count += 1
if count == tlen:
_logme.log('All output files found in {} seconds'
.format(runtime), 'debug')
break
_sleep(wait_time)
if runtime > btme:
_logme.log('Job files have not appeared for ' +
'>{} seconds'.format(btme), lvl)
return False
if not self._updating:
self.update()
if runtime > 2 and self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exit code {}.'
.format(self.exitcode) + ' Cannot find files.',
'error')
return False
self._found_files = True
return True
def _update_name(self, name=None):
"""Make sure the job name is unique.
Sets
----
self.name
Parameters
----------
name : str, optional
A name override, if no provided self.name used
Returns
-------
name : str
"""
# Set name
name = name if name else self.name
if not name:
if callable(self.command):
strcmd = str(self.command).strip('<>')
parts = strcmd.split(' ')
if parts[0] == 'bound':
name = '_'.join(parts[2:3])
else:
parts.remove('function')
try:
parts.remove('built-in')
except ValueError:
pass
name = parts[0]
else:
name = self.command.split(' ')[0].split('/')[-1]
# Make sure name not in queue
if '.' not in name or not name.split('.')[-1] == self.uuid:
name = '{}.{}'.format(name, self.uuid)
self.name = name
return name
def __repr__(self):
"""Return simple job information."""
if not self.initialized:
self.initialize()
self.update()
outstr = "Job:{name}<{mode}:{qtype}".format(
name=self.name, mode=self.kind, qtype=self.qtype)
if self.submitted:
outstr += ':{}'.format(self.id)
outstr += "(command:{cmnd})".format(cmnd=self.command)
if self.submitted or self.done:
outstr += self.state.upper()
elif self.written:
outstr += "WRITTEN"
else:
outstr += "NOT_SUBMITTED"
outstr += ">"
return outstr
def __str__(self):
"""Print job name and ID + status."""
if not self._updating:
self.update()
return "Job: {name} ID: {id}, state: {state}".format(
name=self.name, id=self.id, state=self.state)
def __int__(self):
"""Return integer of ID."""
if self.id:
if str(self.id.isdigit()):
return int(id)
_logme.log('No ID yet.', 'error')
return 0
| mit |
cloudviz/agentless-system-crawler | crawler/plugins/applications/apache/apache_container_crawler.py | 3 | 1888 | from utils.namespace import run_as_another_namespace
import logging
import json
import utils.misc
import dockercontainer
from icrawl_plugin import IContainerCrawler
from plugins.applications.apache import apache_crawler
from requests.exceptions import ConnectionError
logger = logging.getLogger('crawlutils')
class ApacheContainerCrawler(IContainerCrawler):
feature_type = 'application'
feature_key = 'apache'
default_port = 80
def get_feature(self):
return self.feature_key
def crawl(self, container_id=None, **kwargs):
c = dockercontainer.DockerContainer(container_id)
port = None
if "annotation.io.kubernetes.container.ports" in\
c.inspect['Config']['Labels']:
ports = c.inspect['Config']['Labels'][
'annotation.io.kubernetes.container.ports']
ports = json.loads(ports)
else:
ports = c.get_container_ports()
for each_port in ports:
tmp_port = None
if "containerPort" in each_port:
tmp_port = int(each_port['containerPort'])
else:
tmp_port = int(each_port)
if tmp_port == self.default_port:
port = tmp_port
if not port:
return
state = c.inspect['State']
pid = str(state['Pid'])
ips = run_as_another_namespace(
pid, ['net'], utils.misc.get_host_ip4_addresses)
for each_ip in ips:
if each_ip != "127.0.0.1":
ip = each_ip
break
try:
metrics = apache_crawler.retrieve_metrics(ip, port)
return [(self.feature_key, metrics, self.feature_type)]
except:
logger.info("apache does not listen on port:%d", port)
raise ConnectionError("apache does not listen on port:%d", port)
| apache-2.0 |
alx-eu/django | django/conf/locale/sk/formats.py | 108 | 1114 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
malept/youtube-dl | youtube_dl/extractor/drbonanza.py | 27 | 5082 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class DRBonanzaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/(?:[^/]+/)+(?:[^/])+?(?:assetId=(?P<id>\d+))?(?:[#&]|$)'
_TESTS = [{
'url': 'http://www.dr.dk/bonanza/serie/portraetter/Talkshowet.htm?assetId=65517',
'info_dict': {
'id': '65517',
'ext': 'mp4',
'title': 'Talkshowet - Leonard Cohen',
'description': 'md5:8f34194fb30cd8c8a30ad8b27b70c0ca',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1295537932,
'upload_date': '20110120',
'duration': 3664,
},
'params': {
'skip_download': True, # requires rtmp
},
}, {
'url': 'http://www.dr.dk/bonanza/radio/serie/sport/fodbold.htm?assetId=59410',
'md5': '6dfe039417e76795fb783c52da3de11d',
'info_dict': {
'id': '59410',
'ext': 'mp3',
'title': 'EM fodbold 1992 Danmark - Tyskland finale Transmission',
'description': 'md5:501e5a195749480552e214fbbed16c4e',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1223274900,
'upload_date': '20081006',
'duration': 7369,
},
}]
def _real_extract(self, url):
url_id = self._match_id(url)
webpage = self._download_webpage(url, url_id)
if url_id:
info = json.loads(self._html_search_regex(r'({.*?%s.*})' % url_id, webpage, 'json'))
else:
# Just fetch the first video on that page
info = json.loads(self._html_search_regex(r'bonanzaFunctions.newPlaylist\(({.*})\)', webpage, 'json'))
asset_id = str(info['AssetId'])
title = info['Title'].rstrip(' \'\"-,.:;!?')
duration = int_or_none(info.get('Duration'), scale=1000)
# First published online. "FirstPublished" contains the date for original airing.
timestamp = parse_iso8601(
re.sub(r'\.\d+$', '', info['Created']))
def parse_filename_info(url):
match = re.search(r'/\d+_(?P<width>\d+)x(?P<height>\d+)x(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'width': int(match.group('width')),
'height': int(match.group('height')),
'vbr': int(match.group('bitrate')),
'ext': match.group('ext')
}
match = re.search(r'/\d+_(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'vbr': int(match.group('bitrate')),
'ext': match.group(2)
}
return {}
video_types = ['VideoHigh', 'VideoMid', 'VideoLow']
preferencemap = {
'VideoHigh': -1,
'VideoMid': -2,
'VideoLow': -3,
'Audio': -4,
}
formats = []
for file in info['Files']:
if info['Type'] == 'Video':
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'].replace('Video', ''),
'preference': preferencemap.get(file['Type'], -10),
})
if format['url'].startswith('rtmp'):
rtmp_url = format['url']
format['rtmp_live'] = True # --resume does not work
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
elif file['Type'] == 'Thumb':
thumbnail = file['Location']
elif info['Type'] == 'Audio':
if file['Type'] == 'Audio':
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'],
'vcodec': 'none',
})
formats.append(format)
elif file['Type'] == 'Thumb':
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
info['Description'], info['Actors'], info['Colophon'])
self._sort_formats(formats)
display_id = re.sub(r'[^\w\d-]', '', re.sub(r' ', '-', title.lower())) + '-' + asset_id
display_id = re.sub(r'-+', '-', display_id)
return {
'id': asset_id,
'display_id': display_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
}
| unlicense |
elisamussumeci/InfoDenguePredict | infodenguepredict/models/VAR2.py | 2 | 1286 | """
Vector Autogregression using statsmodels
http://statsmodels.sourceforge.net/devel/vector_ar.html
"""
import numpy as np
import pandas as pd
from statsmodels.tsa.api import *
from statsmodels.tsa.vector_ar.var_model import VAR
from datetime import datetime
import matplotlib.pyplot as plt
from infodenguepredict.data.infodengue import get_alerta_table, build_multicity_dataset
def build_model(data):
data.index = pd.DatetimeIndex(data.index)
model = VAR(data)
return model
if __name__ == "__main__":
prediction_window = 5 # weeks
scenario = 'global'
if scenario == 'local':
data = get_alerta_table(3303500) # Nova Iguaçu: 3303500
data = data[['casos', 'nivel']]
else:
data = build_multicity_dataset('RJ')
data = data[[col for col in data.columns if col.startswith('casos') and not col.startswith('casos_est')]][:5]
print(data.info())
# data.casos_est.plot(title="Series")
model = build_model(data)
fit = model.fit(maxlags=11, ic='aic') # 4 lags
print(fit.summary())
fit.plot()
fit.plot_acorr()
plt.figure()
lag_order = fit.k_ar
forecast = fit.forecast(data.values[-lag_order:], prediction_window)
print(forecast)
fit.plot_forecast(prediction_window)
plt.show()
| gpl-3.0 |
apixandru/intellij-community | python/lib/Lib/pwd.py | 93 | 2552 | """
This module provides access to the Unix password database.
Password database entries are reported as 7-tuples containing the
following items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. The
uid and gid items are integers, all others are strings. An exception
is raised if the entry asked for cannot be found.
"""
__all__ = ['getpwuid', 'getpwnam', 'getpwall']
from os import _name, _posix_impl
from org.python.core.Py import newString
if _name == 'nt':
raise ImportError, 'pwd module not supported on Windows'
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
attrs = ['pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos',
'pw_dir', 'pw_shell']
def __new__(cls, pwd):
pwd = (newString(pwd.loginName), newString(pwd.password), int(pwd.UID),
int(pwd.GID), newString(pwd.GECOS), newString(pwd.home),
newString(pwd.shell))
return tuple.__new__(cls, pwd)
def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except ValueError:
raise AttributeError
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry)
def getpwnam(name):
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwnam(name)
if not entry:
raise KeyError(name)
return struct_passwd(entry)
def getpwall():
"""
getpwall() -> list_of_entries
Return a list of all available password database entries,
in arbitrary order.
See pwd.__doc__ for more on password database entries.
"""
entries = []
while True:
entry = _posix_impl.getpwent()
if not entry:
break
entries.append(struct_passwd(entry))
return entries
| apache-2.0 |
Christewart/bitcoin | test/functional/wallet_import_rescan.py | 5 | 8943 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, address=self.address["address"], rescan=rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, pubkey=self.address["pubkey"], rescan=rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, privkey=self.key, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listreceivedbyaddress returns expected values."""
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
extra_args = [["-addresstype=legacy"] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=extra_args)
# Import keys with pruning disabled
self.start_nodes(extra_args=[[]] * self.num_nodes)
super().import_deterministic_coinbase_privkeys()
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def import_deterministic_coinbase_privkeys(self):
pass
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 1 - (i + 1) / 64
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 1 - (2 * i + 1) / 128
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| mit |
akaariai/django | tests/get_or_create/tests.py | 44 | 13321 | from __future__ import unicode_literals
import traceback
from datetime import date
from django.db import DatabaseError, IntegrityError
from django.test import TestCase, TransactionTestCase, ignore_warnings
from django.utils.encoding import DjangoUnicodeDecodeError
from .models import (
Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile,
Publisher, Tag, Thing,
)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(
IntegrityError,
Person.objects.get_or_create, first_name="Tom", last_name="Smith"
)
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.get_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn(str('obj.save'), formatted_traceback)
# MySQL emits a warning when broken data is saved
@ignore_warnings(module='django.db.backends.mysql.base')
def test_savepoint_rollback(self):
"""
Regression test for #20463: the database connection should still be
usable after a DataError or ProgrammingError in .get_or_create().
"""
try:
Person.objects.get_or_create(
birthday=date(1970, 1, 1),
defaults={'first_name': b"\xff", 'last_name': b"\xff"})
except (DatabaseError, DjangoUnicodeDecodeError):
Person.objects.create(
first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1))
else:
self.skipTest("This backend accepts broken utf-8.")
def test_get_or_create_empty(self):
"""
Regression test for #16137: get_or_create does not require kwargs.
"""
try:
DefaultPerson.objects.get_or_create()
except AssertionError:
self.fail("If all the attributes on a model have defaults, we "
"shouldn't need to pass any arguments.")
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
self.assertRaises(IntegrityError, a_thing.tags.get_or_create, text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(IntegrityError,
Person.objects.update_or_create, first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.update_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
| bsd-3-clause |
drufat/vispy | examples/basics/gloo/animate_images.py | 18 | 2964 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2
"""
Example demonstrating showing a, image with a fixed ratio.
"""
import numpy as np
from vispy.util.transforms import ortho
from vispy import gloo
from vispy import app
# Image to be displayed
W, H = 64, 48
I = np.random.uniform(0, 1, (W, H)).astype(np.float32)
# A simple texture quad
data = np.zeros(4, dtype=[('a_position', np.float32, 2),
('a_texcoord', np.float32, 2)])
data['a_position'] = np.array([[0, 0], [W, 0], [0, H], [W, H]])
data['a_texcoord'] = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
VERT_SHADER = """
// Uniforms
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_antialias;
// Attributes
attribute vec2 a_position;
attribute vec2 a_texcoord;
// Varyings
varying vec2 v_texcoord;
// Main
void main (void)
{
v_texcoord = a_texcoord;
gl_Position = u_projection * u_view * u_model * vec4(a_position,0.0,1.0);
}
"""
FRAG_SHADER = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(u_texture, v_texcoord);
gl_FragColor.a = 1.0;
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive', size=((W * 5), (H * 5)))
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.texture = gloo.Texture2D(I, interpolation='linear')
self.program['u_texture'] = self.texture
self.program.bind(gloo.VertexBuffer(data))
self.view = np.eye(4, dtype=np.float32)
self.model = np.eye(4, dtype=np.float32)
self.projection = np.eye(4, dtype=np.float32)
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.projection = ortho(0, W, 0, H, -1, 1)
self.program['u_projection'] = self.projection
gloo.set_clear_color('white')
self._timer = app.Timer('auto', connect=self.update, start=True)
self.show()
def on_resize(self, event):
width, height = event.physical_size
gloo.set_viewport(0, 0, width, height)
self.projection = ortho(0, width, 0, height, -100, 100)
self.program['u_projection'] = self.projection
# Compute thje new size of the quad
r = width / float(height)
R = W / float(H)
if r < R:
w, h = width, width / R
x, y = 0, int((height - h) / 2)
else:
w, h = height * R, height
x, y = int((width - w) / 2), 0
data['a_position'] = np.array(
[[x, y], [x + w, y], [x, y + h], [x + w, y + h]])
self.program.bind(gloo.VertexBuffer(data))
def on_draw(self, event):
gloo.clear(color=True, depth=True)
I[...] = np.random.uniform(0, 1, (W, H)).astype(np.float32)
self.texture.set_data(I)
self.program.draw('triangle_strip')
if __name__ == '__main__':
c = Canvas()
app.run()
| bsd-3-clause |
sigma-random/scrapy | tests/test_selector.py | 14 | 24817 | import re
import warnings
import weakref
from twisted.trial import unittest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import TextResponse, HtmlResponse, XmlResponse
from scrapy.selector import Selector
from scrapy.selector.lxmlsel import XmlXPathSelector, HtmlXPathSelector, XPathSelector
class SelectorTestCase(unittest.TestCase):
sscls = Selector
def test_simple_selection(self):
"""Simple selector tests"""
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
sel = self.sscls(response)
xl = sel.xpath('//input')
self.assertEqual(2, len(xl))
for x in xl:
assert isinstance(x, self.sscls)
self.assertEqual(sel.xpath('//input').extract(),
[x.extract() for x in sel.xpath('//input')])
self.assertEqual([x.extract() for x in sel.xpath("//input[@name='a']/@name")],
[u'a'])
self.assertEqual([x.extract() for x in sel.xpath("number(concat(//input[@name='a']/@value, //input[@name='b']/@value))")],
[u'12.0'])
self.assertEqual(sel.xpath("concat('xpath', 'rules')").extract(),
[u'xpathrules'])
self.assertEqual([x.extract() for x in sel.xpath("concat(//input[@name='a']/@value, //input[@name='b']/@value)")],
[u'12'])
def test_representation_slice(self):
body = u"<p><input name='{}' value='\xa9'/></p>".format(50 * 'b')
response = TextResponse(url="http://example.com", body=body, encoding='utf8')
sel = self.sscls(response)
self.assertEqual(
map(repr, sel.xpath('//input/@name')),
["<Selector xpath='//input/@name' data=u'{}'>".format(40 * 'b')]
)
def test_representation_unicode_query(self):
body = u"<p><input name='{}' value='\xa9'/></p>".format(50 * 'b')
response = TextResponse(url="http://example.com", body=body, encoding='utf8')
sel = self.sscls(response)
self.assertEqual(
map(repr, sel.xpath(u'//input[@value="\xa9"]/@value')),
["<Selector xpath=u'//input[@value=\"\\xa9\"]/@value' data=u'\\xa9'>"]
)
def test_select_unicode_query(self):
body = u"<p><input name='\xa9' value='1'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding='utf8')
sel = self.sscls(response)
self.assertEqual(sel.xpath(u'//input[@name="\xa9"]/@value').extract(), [u'1'])
def test_list_elements_type(self):
"""Test Selector returning the same type in selection methods"""
text = '<p>test<p>'
assert isinstance(self.sscls(text=text).xpath("//p")[0], self.sscls)
assert isinstance(self.sscls(text=text).css("p")[0], self.sscls)
def test_boolean_result(self):
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
xs = self.sscls(response)
self.assertEquals(xs.xpath("//input[@name='a']/@name='a'").extract(), [u'1'])
self.assertEquals(xs.xpath("//input[@name='a']/@name='n'").extract(), [u'0'])
def test_differences_parsing_xml_vs_html(self):
"""Test that XML and HTML Selector's behave differently"""
# some text which is parsed differently by XML and HTML flavors
text = '<div><img src="a.jpg"><p>Hello</div>'
hs = self.sscls(text=text, type='html')
self.assertEqual(hs.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
xs = self.sscls(text=text, type='xml')
self.assertEqual(xs.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
def test_flavor_detection(self):
text = '<div><img src="a.jpg"><p>Hello</div>'
sel = self.sscls(XmlResponse('http://example.com', body=text))
self.assertEqual(sel.type, 'xml')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
sel = self.sscls(HtmlResponse('http://example.com', body=text))
self.assertEqual(sel.type, 'html')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
def test_nested_selectors(self):
"""Nested selector tests"""
body = """<body>
<div class='one'>
<ul>
<li>one</li><li>two</li>
</ul>
</div>
<div class='two'>
<ul>
<li>four</li><li>five</li><li>six</li>
</ul>
</div>
</body>"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
divtwo = x.xpath('//div[@class="two"]')
self.assertEqual(divtwo.xpath("//li").extract(),
["<li>one</li>", "<li>two</li>", "<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath("./ul/li").extract(),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath(".//li").extract(),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath("./li").extract(), [])
def test_mixed_nested_selectors(self):
body = '''<body>
<div id=1>not<span>me</span></div>
<div class="dos"><p>text</p><a href='#'>foo</a></div>
</body>'''
sel = self.sscls(text=body)
self.assertEqual(sel.xpath('//div[@id="1"]').css('span::text').extract(), [u'me'])
self.assertEqual(sel.css('#1').xpath('./span/text()').extract(), [u'me'])
def test_dont_strip(self):
sel = self.sscls(text='<div>fff: <a href="#">zzz</a></div>')
self.assertEqual(sel.xpath("//text()").extract(), [u'fff: ', u'zzz'])
def test_namespaces_simple(self):
body = """
<test xmlns:somens="http://scrapy.org">
<somens:a id="foo">take this</a>
<a id="bar">found</a>
</test>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
x.register_namespace("somens", "http://scrapy.org")
self.assertEqual(x.xpath("//somens:a/text()").extract(),
[u'take this'])
def test_namespaces_multiple(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<BrowseNode xmlns="http://webservices.amazon.com/AWSECommerceService/2005-10-05"
xmlns:b="http://somens.com"
xmlns:p="http://www.scrapy.org/product" >
<b:Operation>hello</b:Operation>
<TestTag b:att="value"><Other>value</Other></TestTag>
<p:SecondTestTag><material>iron</material><price>90</price><p:name>Dried Rose</p:name></p:SecondTestTag>
</BrowseNode>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
x.register_namespace("xmlns", "http://webservices.amazon.com/AWSECommerceService/2005-10-05")
x.register_namespace("p", "http://www.scrapy.org/product")
x.register_namespace("b", "http://somens.com")
self.assertEqual(len(x.xpath("//xmlns:TestTag")), 1)
self.assertEqual(x.xpath("//b:Operation/text()").extract()[0], 'hello')
self.assertEqual(x.xpath("//xmlns:TestTag/@b:att").extract()[0], 'value')
self.assertEqual(x.xpath("//p:SecondTestTag/xmlns:price/text()").extract()[0], '90')
self.assertEqual(x.xpath("//p:SecondTestTag").xpath("./xmlns:price/text()")[0].extract(), '90')
self.assertEqual(x.xpath("//p:SecondTestTag/xmlns:material/text()").extract()[0], 'iron')
def test_re(self):
body = """<div>Name: Mary
<ul>
<li>Name: John</li>
<li>Age: 10</li>
<li>Name: Paul</li>
<li>Age: 20</li>
</ul>
Age: 20
</div>"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
name_re = re.compile("Name: (\w+)")
self.assertEqual(x.xpath("//ul/li").re(name_re),
["John", "Paul"])
self.assertEqual(x.xpath("//ul/li").re("Age: (\d+)"),
["10", "20"])
def test_re_intl(self):
body = """<div>Evento: cumplea\xc3\xb1os</div>"""
response = HtmlResponse(url="http://example.com", body=body, encoding='utf-8')
x = self.sscls(response)
self.assertEqual(x.xpath("//div").re("Evento: (\w+)"), [u'cumplea\xf1os'])
def test_selector_over_text(self):
hs = self.sscls(text='<root>lala</root>')
self.assertEqual(hs.extract(), u'<html><body><root>lala</root></body></html>')
xs = self.sscls(text='<root>lala</root>', type='xml')
self.assertEqual(xs.extract(), u'<root>lala</root>')
self.assertEqual(xs.xpath('.').extract(), [u'<root>lala</root>'])
def test_invalid_xpath(self):
response = XmlResponse(url="http://example.com", body="<html></html>")
x = self.sscls(response)
xpath = "//test[@foo='bar]"
try:
x.xpath(xpath)
except ValueError as e:
assert xpath in str(e), "Exception message does not contain invalid xpath"
except Exception:
raise AssertionError("A invalid XPath does not raise ValueError")
else:
raise AssertionError("A invalid XPath does not raise an exception")
def test_http_header_encoding_precedence(self):
# u'\xa3' = pound symbol in unicode
# u'\xc2\xa3' = pound symbol in utf-8
# u'\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = u'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
head = u'<head>' + meta + u'</head>'
body_content = u'<span id="blank">\xa3</span>'
body = u'<body>' + body_content + u'</body>'
html = u'<html>' + head + body + u'</html>'
encoding = 'utf-8'
html_utf8 = html.encode(encoding)
headers = {'Content-Type': ['text/html; charset=utf-8']}
response = HtmlResponse(url="http://example.com", headers=headers, body=html_utf8)
x = self.sscls(response)
self.assertEquals(x.xpath("//span[@id='blank']/text()").extract(),
[u'\xa3'])
def test_empty_bodies(self):
# shouldn't raise errors
r1 = TextResponse('http://www.example.com', body='')
self.sscls(r1).xpath('//text()').extract()
def test_null_bytes(self):
# shouldn't raise errors
r1 = TextResponse('http://www.example.com', \
body='<root>pre\x00post</root>', \
encoding='utf-8')
self.sscls(r1).xpath('//text()').extract()
def test_badly_encoded_body(self):
# \xe9 alone isn't valid utf8 sequence
r1 = TextResponse('http://www.example.com', \
body='<html><p>an Jos\xe9 de</p><html>', \
encoding='utf-8')
self.sscls(r1).xpath('//text()').extract()
def test_select_on_unevaluable_nodes(self):
r = self.sscls(text=u'<span class="big">some text</span>')
# Text node
x1 = r.xpath('//text()')
self.assertEquals(x1.extract(), [u'some text'])
self.assertEquals(x1.xpath('.//b').extract(), [])
# Tag attribute
x1 = r.xpath('//span/@class')
self.assertEquals(x1.extract(), [u'big'])
self.assertEquals(x1.xpath('.//text()').extract(), [])
def test_select_on_text_nodes(self):
r = self.sscls(text=u'<div><b>Options:</b>opt1</div><div><b>Other</b>opt2</div>')
x1 = r.xpath("//div/descendant::text()[preceding-sibling::b[contains(text(), 'Options')]]")
self.assertEquals(x1.extract(), [u'opt1'])
x1 = r.xpath("//div/descendant::text()/preceding-sibling::b[contains(text(), 'Options')]")
self.assertEquals(x1.extract(), [u'<b>Options:</b>'])
def test_nested_select_on_text_nodes(self):
# FIXME: does not work with lxml backend [upstream]
r = self.sscls(text=u'<div><b>Options:</b>opt1</div><div><b>Other</b>opt2</div>')
x1 = r.xpath("//div/descendant::text()")
x2 = x1.xpath("./preceding-sibling::b[contains(text(), 'Options')]")
self.assertEquals(x2.extract(), [u'<b>Options:</b>'])
test_nested_select_on_text_nodes.skip = "Text nodes lost parent node reference in lxml"
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
x = self.sscls()
weakref.ref(x)
assert not hasattr(x, '__dict__'), "%s does not use __slots__" % \
x.__class__.__name__
def test_remove_namespaces(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-US" xmlns:media="http://search.yahoo.com/mrss/">
<link type="text/html">
<link type="application/atom+xml">
</feed>
"""
sel = self.sscls(XmlResponse("http://example.com/feed.atom", body=xml))
self.assertEqual(len(sel.xpath("//link")), 0)
sel.remove_namespaces()
self.assertEqual(len(sel.xpath("//link")), 2)
def test_remove_attributes_namespaces(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns:atom="http://www.w3.org/2005/Atom" xml:lang="en-US" xmlns:media="http://search.yahoo.com/mrss/">
<link atom:type="text/html">
<link atom:type="application/atom+xml">
</feed>
"""
sel = self.sscls(XmlResponse("http://example.com/feed.atom", body=xml))
self.assertEqual(len(sel.xpath("//link/@type")), 0)
sel.remove_namespaces()
self.assertEqual(len(sel.xpath("//link/@type")), 2)
def test_smart_strings(self):
"""Lxml smart strings return values"""
class SmartStringsSelector(Selector):
_lxml_smart_strings = True
body = """<body>
<div class='one'>
<ul>
<li>one</li><li>two</li>
</ul>
</div>
<div class='two'>
<ul>
<li>four</li><li>five</li><li>six</li>
</ul>
</div>
</body>"""
response = HtmlResponse(url="http://example.com", body=body)
# .getparent() is available for text nodes and attributes
# only when smart_strings are on
x = self.sscls(response)
li_text = x.xpath('//li/text()')
self.assertFalse(any(map(lambda e: hasattr(e._root, 'getparent'), li_text)))
div_class = x.xpath('//div/@class')
self.assertFalse(any(map(lambda e: hasattr(e._root, 'getparent'), div_class)))
x = SmartStringsSelector(response)
li_text = x.xpath('//li/text()')
self.assertTrue(all(map(lambda e: hasattr(e._root, 'getparent'), li_text)))
div_class = x.xpath('//div/@class')
self.assertTrue(all(map(lambda e: hasattr(e._root, 'getparent'), div_class)))
def test_xml_entity_expansion(self):
malicious_xml = '<?xml version="1.0" encoding="ISO-8859-1"?>'\
'<!DOCTYPE foo [ <!ELEMENT foo ANY > <!ENTITY xxe SYSTEM '\
'"file:///etc/passwd" >]><foo>&xxe;</foo>'
response = XmlResponse('http://example.com', body=malicious_xml)
sel = self.sscls(response=response)
self.assertEqual(sel.extract(), '<foo>&xxe;</foo>')
class DeprecatedXpathSelectorTest(unittest.TestCase):
text = '<div><img src="a.jpg"><p>Hello</div>'
def test_warnings_xpathselector(self):
cls = XPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
def test_warnings_xmlxpathselector(self):
cls = XmlXPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(issubclass(cls, XPathSelector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
self.assertTrue(isinstance(sel, XPathSelector))
self.assertTrue(isinstance(usel, XPathSelector))
def test_warnings_htmlxpathselector(self):
cls = HtmlXPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(issubclass(cls, XPathSelector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
self.assertTrue(isinstance(sel, XPathSelector))
self.assertTrue(isinstance(usel, XPathSelector))
def test_xpathselector(self):
with warnings.catch_warnings(record=True):
hs = XPathSelector(text=self.text)
self.assertEqual(hs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
self.assertRaises(RuntimeError, hs.css, 'div')
def test_htmlxpathselector(self):
with warnings.catch_warnings(record=True):
hs = HtmlXPathSelector(text=self.text)
self.assertEqual(hs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
self.assertRaises(RuntimeError, hs.css, 'div')
def test_xmlxpathselector(self):
with warnings.catch_warnings(record=True):
xs = XmlXPathSelector(text=self.text)
self.assertEqual(xs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
self.assertRaises(RuntimeError, xs.css, 'div')
class ExsltTestCase(unittest.TestCase):
sscls = Selector
def test_regexp(self):
"""EXSLT regular expression tests"""
body = """
<p><input name='a' value='1'/><input name='b' value='2'/></p>
<div class="links">
<a href="/first.html">first link</a>
<a href="/second.html">second link</a>
<a href="http://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.xml">EXSLT match example</a>
</div>
"""
response = TextResponse(url="http://example.com", body=body)
sel = self.sscls(response)
# re:test()
self.assertEqual(
sel.xpath(
'//input[re:test(@name, "[A-Z]+", "i")]').extract(),
[x.extract() for x in sel.xpath('//input[re:test(@name, "[A-Z]+", "i")]')])
self.assertEqual(
[x.extract()
for x in sel.xpath(
'//a[re:test(@href, "\.html$")]/text()')],
[u'first link', u'second link'])
self.assertEqual(
[x.extract()
for x in sel.xpath(
'//a[re:test(@href, "first")]/text()')],
[u'first link'])
self.assertEqual(
[x.extract()
for x in sel.xpath(
'//a[re:test(@href, "second")]/text()')],
[u'second link'])
# re:match() is rather special: it returns a node-set of <match> nodes
#[u'<match>http://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.xml</match>',
#u'<match>http</match>',
#u'<match>www.bayes.co.uk</match>',
#u'<match></match>',
#u'<match>/xml/index.xml?/xml/utils/rechecker.xml</match>']
self.assertEqual(
sel.xpath('re:match(//a[re:test(@href, "\.xml$")]/@href,'
'"(\w+):\/\/([^/:]+)(:\d*)?([^# ]*)")/text()').extract(),
[u'http://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.xml',
u'http',
u'www.bayes.co.uk',
u'',
u'/xml/index.xml?/xml/utils/rechecker.xml'])
# re:replace()
self.assertEqual(
sel.xpath('re:replace(//a[re:test(@href, "\.xml$")]/@href,'
'"(\w+)://(.+)(\.xml)", "","https://\\2.html")').extract(),
[u'https://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.html'])
def test_set(self):
"""EXSLT set manipulation tests"""
# microdata example from http://schema.org/Event
body="""
<div itemscope itemtype="http://schema.org/Event">
<a itemprop="url" href="nba-miami-philidelphia-game3.html">
NBA Eastern Conference First Round Playoff Tickets:
<span itemprop="name"> Miami Heat at Philadelphia 76ers - Game 3 (Home Game 1) </span>
</a>
<meta itemprop="startDate" content="2016-04-21T20:00">
Thu, 04/21/16
8:00 p.m.
<div itemprop="location" itemscope itemtype="http://schema.org/Place">
<a itemprop="url" href="wells-fargo-center.html">
Wells Fargo Center
</a>
<div itemprop="address" itemscope itemtype="http://schema.org/PostalAddress">
<span itemprop="addressLocality">Philadelphia</span>,
<span itemprop="addressRegion">PA</span>
</div>
</div>
<div itemprop="offers" itemscope itemtype="http://schema.org/AggregateOffer">
Priced from: <span itemprop="lowPrice">$35</span>
<span itemprop="offerCount">1938</span> tickets left
</div>
</div>
"""
response = TextResponse(url="http://example.com", body=body)
sel = self.sscls(response)
self.assertEqual(
sel.xpath('''//div[@itemtype="http://schema.org/Event"]
//@itemprop''').extract(),
[u'url',
u'name',
u'startDate',
u'location',
u'url',
u'address',
u'addressLocality',
u'addressRegion',
u'offers',
u'lowPrice',
u'offerCount']
)
self.assertEqual(sel.xpath('''
set:difference(//div[@itemtype="http://schema.org/Event"]
//@itemprop,
//div[@itemtype="http://schema.org/Event"]
//*[@itemscope]/*/@itemprop)''').extract(),
[u'url', u'name', u'startDate', u'location', u'offers'])
| bsd-3-clause |
chrrrles/ansible-modules-extras | monitoring/uptimerobot.py | 58 | 4159 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: uptimerobot
short_description: Pause and start Uptime Robot monitoring
description:
- This module will let you start and pause Uptime Robot Monitoring
author: "Nate Kingsley (@nate-kingsley)"
version_added: "1.9"
requirements:
- Valid Uptime Robot API Key
options:
state:
description:
- Define whether or not the monitor should be running or paused.
required: true
default: null
choices: [ "started", "paused" ]
aliases: []
monitorid:
description:
- ID of the monitor to check.
required: true
default: null
choices: []
aliases: []
apikey:
description:
- Uptime Robot API key.
required: true
default: null
choices: []
aliases: []
notes:
- Support for adding and removing monitors and alert contacts has not yet been implemented.
'''
EXAMPLES = '''
# Pause the monitor with an ID of 12345.
- uptimerobot: monitorid=12345
apikey=12345-1234512345
state=paused
# Start the monitor with an ID of 12345.
- uptimerobot: monitorid=12345
apikey=12345-1234512345
state=started
'''
import json
import urllib
import time
API_BASE = "http://api.uptimerobot.com/"
API_ACTIONS = dict(
status='getMonitors?',
editMonitor='editMonitor?'
)
API_FORMAT = 'json'
API_NOJSONCALLBACK = 1
CHANGED_STATE = False
SUPPORTS_CHECK_MODE = False
def checkID(module, params):
data = urllib.urlencode(params)
full_uri = API_BASE + API_ACTIONS['status'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult
def startMonitor(module, params):
params['monitorStatus'] = 1
data = urllib.urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def pauseMonitor(module, params):
params['monitorStatus'] = 0
data = urllib.urlencode(params)
full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
req, info = fetch_url(module, full_uri)
result = req.read()
jsonresult = json.loads(result)
req.close()
return jsonresult['stat']
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(required=True, choices=['started', 'paused']),
apikey = dict(required=True),
monitorid = dict(required=True)
),
supports_check_mode=SUPPORTS_CHECK_MODE
)
params = dict(
apiKey=module.params['apikey'],
monitors=module.params['monitorid'],
monitorID=module.params['monitorid'],
format=API_FORMAT,
noJsonCallback=API_NOJSONCALLBACK
)
check_result = checkID(module, params)
if check_result['stat'] != "ok":
module.fail_json(
msg="failed",
result=check_result['message']
)
if module.params['state'] == 'started':
monitor_result = startMonitor(module, params)
else:
monitor_result = pauseMonitor(module, params)
module.exit_json(
msg="success",
result=monitor_result
)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mnahm5/django-estore | Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py | 322 | 4684 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import re
from pip._vendor.six import string_types
from . import base
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, _, _, flag = node
if flag in ("text", "tail"):
return base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
assert list(parents[-1]).count(parent) == 1
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mit |
hagabbar/pycbc_copy | pycbc/io/hdf.py | 1 | 31952 | # convenience classes for accessing hdf5 trigger files
# the 'get_column()' method is implemented parallel to
# the existing pylal.SnglInspiralUtils functions
import h5py
import numpy as np
import logging
import inspect
from lal import LIGOTimeGPS, YRJUL_SI
from pycbc_glue.ligolw import ligolw
from pycbc_glue.ligolw import table
from pycbc_glue.ligolw import lsctables
from pycbc_glue.ligolw import ilwd
from pycbc_glue.ligolw import utils as ligolw_utils
from pycbc_glue.ligolw.utils import process as ligolw_process
from pycbc import version as pycbc_version
from pycbc.tmpltbank import return_search_summary
from pycbc.tmpltbank import return_empty_sngl
from pycbc import events, conversions, pnutils
class HFile(h5py.File):
""" Low level extensions to the capabilities of reading an hdf5 File
"""
def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
#store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices, res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices,) + res
else:
return res
class DictArray(object):
""" Utility for organizing sets of arrays of equal length.
Manages a dictionary of arrays of equal length. This can also
be instantiated with a set of hdf5 files and the key values. The full
data is always in memory and all operations create new instances of the
DictArray.
"""
def __init__(self, data=None, files=None, groups=None):
""" Create a DictArray
Parameters
----------
data: dict, optional
Dictionary of equal length numpy arrays
files: list of filenames, optional
List of hdf5 file filenames. Incompatibile with the `data` option.
groups: list of strings
List of keys into each file. Required by the files options.
"""
self.data = data
if files:
self.data = {}
for g in groups:
self.data[g] = []
for f in files:
d = HFile(f)
for g in groups:
if g in d:
self.data[g].append(d[g][:])
d.close()
for k in self.data:
if not len(self.data[k]) == 0:
self.data[k] = np.concatenate(self.data[k])
for k in self.data:
setattr(self, k, self.data[k])
def _return(self, data):
return self.__class__(data=data)
def __len__(self):
return len(self.data[self.data.keys()[0]])
def __add__(self, other):
data = {}
for k in self.data:
data[k] = np.concatenate([self.data[k], other.data[k]])
return self._return(data=data)
def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
data[k] = self.data[k][idx]
return self._return(data=data)
def remove(self, idx):
""" Return a new DictArray that does not contain the indexed values
"""
data = {}
for k in self.data:
data[k] = np.delete(self.data[k], idx)
return self._return(data=data)
class StatmapData(DictArray):
def __init__(self, data=None, seg=None, attrs=None,
files=None):
groups = ['stat', 'time1', 'time2', 'trigger_id1', 'trigger_id2',
'template_id', 'decimation_factor', 'timeslide_id']
super(StatmapData, self).__init__(data=data, files=files, groups=groups)
if data:
self.seg=seg
self.attrs=attrs
elif files:
f = HFile(files[0], "r")
self.seg = f['segments']
self.attrs = f.attrs
def _return(self, data):
return self.__class__(data=data, attrs=self.attrs, seg=self.seg)
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid)
def save(self, outname):
f = HFile(outname, "w")
for k in self.attrs:
f.attrs[k] = self.attrs[k]
for k in self.data:
f.create_dataset(k, data=self.data[k],
compression='gzip',
compression_opts=9,
shuffle=True)
for key in self.seg.keys():
f['segments/%s/start' % key] = self.seg[key]['start'][:]
f['segments/%s/end' % key] = self.seg[key]['end'][:]
f.close()
class FileData(object):
def __init__(self, fname, group=None, columnlist=None, filter_func=None):
"""
Parameters
----------
group : string
Name of group to be read from the file
columnlist : list of strings
Names of columns to be read; if None, use all existing columns
filter_func : string
String should evaluate to a Boolean expression using attributes
of the class instance derived from columns: ex. 'self.snr < 6.5'
"""
if not fname: raise RuntimeError("Didn't get a file!")
self.fname = fname
self.h5file = HFile(fname, "r")
if group is None:
if len(self.h5file.keys()) == 1:
group = self.h5file.keys()[0]
else:
raise RuntimeError("Didn't get a group!")
self.group_key = group
self.group = self.h5file[group]
self.columns = columnlist if columnlist is not None \
else self.group.keys()
self.filter_func = filter_func
self._mask = None
def close(self):
self.h5file.close()
@property
def mask(self):
"""
Create a mask implementing the requested filter on the datasets
Returns
-------
array of Boolean
True for dataset indices to be returned by the get_column method
"""
if self.filter_func is None:
raise RuntimeError("Can't get a mask without a filter function!")
else:
# only evaluate if no previous calculation was done
if self._mask is None:
# get required columns into the namespace as numpy arrays
for column in self.columns:
if column in self.filter_func:
setattr(self, column, self.group[column][:])
self._mask = eval(self.filter_func)
return self._mask
def get_column(self, col):
"""
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested
"""
# catch corner case with an empty file (group with no datasets)
if not len(self.group.keys()):
return np.array([])
vals = self.group[col]
if self.filter_func:
return vals[self.mask]
else:
return vals[:]
class DataFromFiles(object):
def __init__(self, filelist, group=None, columnlist=None, filter_func=None):
self.files = filelist
self.group = group
self.columns = columnlist
self.filter_func = filter_func
def get_column(self, col):
"""
Loop over files getting the requested dataset values from each
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested and
concatenated in order of file list
"""
logging.info('getting %s' % col)
vals = []
for f in self.files:
d = FileData(f, group=self.group, columnlist=self.columns,
filter_func=self.filter_func)
vals.append(d.get_column(col))
# Close each file since h5py has an upper limit on the number of
# open file objects (approx. 1000)
d.close()
logging.info('- got %i values' % sum(len(v) for v in vals))
return np.concatenate(vals)
class SingleDetTriggers(object):
"""
Provides easy access to the parameters of single-detector CBC triggers.
"""
# FIXME: Some of these are optional and should be kwargs.
def __init__(self, trig_file, bank_file, veto_file, segment_name, filter_func, detector):
logging.info('Loading triggers')
self.trigs_f = HFile(trig_file, 'r')
self.trigs = self.trigs_f[detector]
if bank_file:
logging.info('Loading bank')
self.bank = HFile(bank_file, 'r')
else:
logging.info('No bank file given')
# empty dict in place of non-existent hdf file
self.bank = {}
if veto_file:
logging.info('Applying veto segments')
# veto_mask is an array of indices into the trigger arrays
# giving the surviving triggers
logging.info('%i triggers before vetoes',
len(self.trigs['end_time'][:]))
self.veto_mask, _ = events.veto.indices_outside_segments(
self.trigs['end_time'][:], [veto_file],
ifo=detector, segment_name=segment_name)
logging.info('%i triggers remain after vetoes',
len(self.veto_mask))
else:
self.veto_mask = np.arange(len(self.trigs['end_time']))
if filter_func:
# get required columns into the namespace with dummy attribute
# names to avoid confusion with other class properties
for c in self.trigs.keys():
if c in filter_func:
setattr(self, '_'+c, self.trigs[c][:])
for c in self.bank.keys():
if c in filter_func:
# get template parameters corresponding to triggers
setattr(self, '_'+c,
np.array(self.bank[c])[self.trigs['template_id'][:]])
self.filter_mask = eval(filter_func.replace('self.', 'self._'))
# remove the dummy attributes
for c in self.trigs.keys() + self.bank.keys():
if c in filter_func: delattr(self, '_'+c)
self.boolean_veto = np.in1d(np.arange(len(self.trigs['end_time'])),
self.veto_mask, assume_unique=True)
self.mask = np.logical_and(self.boolean_veto, self.filter_mask)
logging.info('%i triggers remain after cut on %s',
len(self.trigs['end_time'][self.mask]), filter_func)
else:
self.mask = self.veto_mask
def checkbank(self, param):
if self.bank == {}:
return RuntimeError("Can't get %s values without a bank file"
% param)
@classmethod
def get_param_names(cls):
"""Returns a list of plottable CBC parameter variables"""
return [m[0] for m in inspect.getmembers(cls) \
if type(m[1]) == property]
def mask_to_n_loudest_clustered_events(self, n_loudest=10,
ranking_statistic="newsnr",
cluster_window=10):
"""Edits the mask property of the class to point to the N loudest
single detector events as ranked by ranking statistic. Events are
clustered so that no more than 1 event within +/- cluster-window will
be considered."""
# If this becomes memory intensive we can optimize
if ranking_statistic == "newsnr":
stat = self.newsnr
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR"
elif ranking_statistic == "newsnr_sgveto":
stat = self.newsnr_sgveto
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR (+sgveto)"
elif ranking_statistic == "snr":
stat = self.snr
self.stat_name = "SNR"
else:
err_msg = "Don't recognize statistic %s." % (ranking_statistic)
raise ValueError(err_msg)
times = self.end_time
index = stat.argsort()[::-1]
new_times = []
new_index = []
for curr_idx in index:
curr_time = times[curr_idx]
for time in new_times:
if abs(curr_time - time) < cluster_window:
break
else:
# Only get here if no other triggers within cluster window
new_index.append(curr_idx)
new_times.append(curr_time)
if len(new_index) >= n_loudest:
break
index = np.array(new_index)
self.stat = stat[index]
if self.mask.dtype == 'bool':
orig_indices = self.mask.nonzero()[0][index]
self.mask = np.in1d(np.arange(len(self.mask)), orig_indices,
assume_unique=True)
else:
self.mask = self.mask[index]
@property
def template_id(self):
return np.array(self.trigs['template_id'])[self.mask]
@property
def mass1(self):
self.checkbank('mass1')
return np.array(self.bank['mass1'])[self.template_id]
@property
def mass2(self):
self.checkbank('mass2')
return np.array(self.bank['mass2'])[self.template_id]
@property
def spin1z(self):
self.checkbank('spin1z')
return np.array(self.bank['spin1z'])[self.template_id]
@property
def spin2z(self):
self.checkbank('spin2z')
return np.array(self.bank['spin2z'])[self.template_id]
@property
def spin2x(self):
self.checkbank('spin2x')
return np.array(self.bank['spin2x'])[self.template_id]
@property
def spin2y(self):
self.checkbank('spin2y')
return np.array(self.bank['spin2y'])[self.template_id]
@property
def spin1x(self):
self.checkbank('spin1x')
return np.array(self.bank['spin1x'])[self.template_id]
@property
def spin1y(self):
self.checkbank('spin1y')
return np.array(self.bank['spin1y'])[self.template_id]
@property
def inclination(self):
self.checkbank('inclination')
return np.array(self.bank['inclination'])[self.template_id]
@property
def f_lower(self):
self.checkbank('f_lower')
return np.array(self.bank['f_lower'])[self.template_id]
@property
def mtotal(self):
return self.mass1 + self.mass2
@property
def mchirp(self):
return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2)
@property
def eta(self):
return conversions.eta_from_mass1_mass2(self.mass1, self.mass2)
@property
def effective_spin(self):
# FIXME assumes aligned spins
return conversions.chi_eff(self.mass1, self.mass2,
self.spin1z, self.spin2z)
# IMPROVEME: would like to have a way to access all get_freq and/or
# other pnutils.* names rather than hard-coding each one
# - eg make this part of a fancy interface to the bank file ?
@property
def f_seobnrv2_peak(self):
return pnutils.get_freq('fSEOBNRv2Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def f_seobnrv4_peak(self):
return pnutils.get_freq('fSEOBNRv4Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def end_time(self):
return np.array(self.trigs['end_time'])[self.mask]
@property
def template_duration(self):
return np.array(self.trigs['template_duration'])[self.mask]
@property
def snr(self):
return np.array(self.trigs['snr'])[self.mask]
@property
def sgchisq(self):
return np.array(self.trigs['sg_chisq'])[self.mask]
@property
def u_vals(self):
return np.array(self.trigs['u_vals'])[self.mask]
@property
def rchisq(self):
return np.array(self.trigs['chisq'])[self.mask] \
/ (np.array(self.trigs['chisq_dof'])[self.mask] * 2 - 2)
@property
def newsnr(self):
return events.newsnr(self.snr, self.rchisq)
@property
def newsnr_sgveto(self):
return events.newsnr_sgveto(self.snr, self.rchisq, self.sgchisq)
def get_column(self, cname):
if hasattr(self, cname):
return getattr(self, cname)
else:
return np.array(self.trigs[cname])[self.mask]
class ForegroundTriggers(object):
# FIXME: A lot of this is hardcoded to expect two ifos
def __init__(self, coinc_file, bank_file, sngl_files=None, n_loudest=None,
group='foreground'):
self.coinc_file = FileData(coinc_file, group=group)
self.sngl_files = {}
if sngl_files is not None:
for file in sngl_files:
curr_dat = FileData(file)
curr_ifo = curr_dat.group_key
self.sngl_files[curr_ifo] = curr_dat
self.bank_file = HFile(bank_file, "r")
self.n_loudest = n_loudest
self._sort_arr = None
self._template_id = None
self._trig_ids = None
@property
def sort_arr(self):
if self._sort_arr is None:
ifar = self.coinc_file.get_column('ifar')
sorting = ifar.argsort()[::-1]
if self.n_loudest:
sorting = sorting[:self.n_loudest]
self._sort_arr = sorting
return self._sort_arr
@property
def template_id(self):
if self._template_id is None:
template_id = self.get_coincfile_array('template_id')
self._template_id = template_id
return self._template_id
@property
def trig_id(self):
if self._trig_ids is not None:
return self._trig_ids
self._trig_ids = {}
# FIXME: There is no clear mapping from trig_id to ifo. This is bad!!!
# for now a hack is in place.
ifo1 = self.coinc_file.h5file.attrs['detector_1']
ifo2 = self.coinc_file.h5file.attrs['detector_2']
trigid1 = self.get_coincfile_array('trigger_id1')
trigid2 = self.get_coincfile_array('trigger_id2')
self._trig_ids[ifo1] = trigid1
self._trig_ids[ifo2] = trigid2
return self._trig_ids
def get_coincfile_array(self, variable):
return self.coinc_file.get_column(variable)[self.sort_arr]
def get_bankfile_array(self, variable):
try:
return self.bank_file[variable][:][self.template_id]
except IndexError:
if len(self.template_id) == 0:
return np.array([])
raise
def get_snglfile_array_dict(self, variable):
return_dict = {}
for ifo in self.sngl_files.keys():
try:
curr = self.sngl_files[ifo].get_column(variable)[\
self.trig_id[ifo]]
except IndexError:
if len(self.trig_id[ifo]) == 0:
curr = np.array([])
else:
raise
return_dict[ifo] = curr
return return_dict
def to_coinc_xml_object(self, file_name):
# FIXME: This function will only work with two ifos!!
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
ifos = [ifo for ifo in self.sngl_files.keys()]
proc_id = ligolw_process.register_to_xmldoc(outdoc, 'pycbc',
{}, ifos=ifos, comment='', version=pycbc_version.git_hash,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=pycbc_version.date).process_id
search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
coinc_h5file = self.coinc_file.h5file
start_time = coinc_h5file['segments']['coinc']['start'][:].min()
end_time = coinc_h5file['segments']['coinc']['end'][:].max()
num_trigs = len(self.sort_arr)
search_summary = return_search_summary(start_time, end_time,
num_trigs, ifos)
search_summ_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summ_table)
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
time_slide_table = lsctables.New(lsctables.TimeSlideTable)
# Set up time_slide table
time_slide_id = lsctables.TimeSlideID(0)
for ifo in ifos:
time_slide_row = lsctables.TimeSlide()
time_slide_row.instrument = ifo
time_slide_row.time_slide_id = time_slide_id
time_slide_row.offset = 0
time_slide_row.process_id = proc_id
time_slide_table.append(time_slide_row)
# Set up coinc_definer table
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincidences"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
bank_col_vals = {}
for name in bank_col_names:
bank_col_vals[name] = self.get_bankfile_array(name)
coinc_event_names = ['ifar', 'time1', 'fap', 'stat']
coinc_event_vals = {}
for name in coinc_event_names:
coinc_event_vals[name] = self.get_coincfile_array(name)
sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq',
'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof',
'end_time', 'template_duration', 'coa_phase',
'sigmasq']
sngl_col_vals = {}
for name in sngl_col_names:
sngl_col_vals[name] = self.get_snglfile_array_dict(name)
for idx in xrange(len(self.sort_arr)):
# Set up IDs and mapping values
coinc_id = lsctables.CoincID(idx)
# Set up sngls
# FIXME: As two-ifo is hardcoded loop over all ifos
sngl_combined_mchirp = 0
sngl_combined_mtot = 0
for ifo in ifos:
sngl_id = self.trig_id[ifo][idx]
event_id = lsctables.SnglInspiralID(sngl_id)
sngl = return_empty_sngl()
sngl.event_id = event_id
sngl.ifo = ifo
for name in sngl_col_names:
val = sngl_col_vals[name][ifo][idx]
if name == 'end_time':
sngl.set_end(LIGOTimeGPS(val))
else:
setattr(sngl, name, val)
for name in bank_col_names:
val = bank_col_vals[name][idx]
setattr(sngl, name, val)
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
sngl_combined_mchirp += sngl.mchirp
sngl_combined_mtot += sngl.mtotal
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = event_id
coinc_event_map_table.append(coinc_map_row)
sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
sngl_combined_mtot = sngl_combined_mtot / len(ifos)
# Set up coinc inspiral and coinc event tables
coinc_event_row = lsctables.Coinc()
coinc_inspiral_row = lsctables.CoincInspiral()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(ifos)
coinc_event_row.instruments = ','.join(ifos)
coinc_inspiral_row.set_ifos(ifos)
coinc_event_row.time_slide_id = time_slide_id
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_combined_mchirp
coinc_inspiral_row.mass = sngl_combined_mtot
coinc_inspiral_row.set_end(\
LIGOTimeGPS(coinc_event_vals['time1'][idx]))
coinc_inspiral_row.snr = coinc_event_vals['stat'][idx]
coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx]
# Transform to Hz
coinc_inspiral_row.combined_far = \
coinc_inspiral_row.combined_far / YRJUL_SI
coinc_event_row.likelihood = 0.
coinc_inspiral_row.minimum_duration = 0.
coinc_event_table.append(coinc_event_row)
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
outdoc.childNodes[0].appendChild(coinc_event_table)
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(time_slide_table)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
ligolw_utils.write_filename(outdoc, file_name)
chisq_choices = ['traditional', 'cont', 'bank', 'max_cont_trad', 'sg',
'max_bank_cont', 'max_bank_trad', 'max_bank_cont_trad']
def get_chisq_from_file_choice(hdfile, chisq_choice):
f = hdfile
if chisq_choice in ['traditional','max_cont_trad', 'max_bank_trad',
'max_bank_cont_trad']:
trad_chisq = f['chisq'][:]
# We now need to handle the case where chisq is not actually calculated
# 0 is used as a sentinel value
trad_chisq_dof = f['chisq_dof'][:]
trad_chisq /= (trad_chisq_dof * 2 - 2)
if chisq_choice in ['cont', 'max_cont_trad', 'max_bank_cont',
'max_bank_cont_trad']:
cont_chisq = f['cont_chisq'][:]
cont_chisq_dof = f['cont_chisq_dof'][:]
cont_chisq /= cont_chisq_dof
if chisq_choice in ['bank', 'max_bank_cont', 'max_bank_trad',
'max_bank_cont_trad']:
bank_chisq = f['bank_chisq'][:]
bank_chisq_dof = f['bank_chisq_dof'][:]
bank_chisq /= bank_chisq_dof
if chisq_choice == 'sg':
chisq = f['sg_chisq'][:]
elif chisq_choice == 'traditional':
chisq = trad_chisq
elif chisq_choice == 'cont':
chisq = cont_chisq
elif chisq_choice == 'bank':
chisq = bank_chisq
elif chisq_choice == 'max_cont_trad':
chisq = np.maximum(trad_chisq, cont_chisq)
elif chisq_choice == 'max_bank_cont':
chisq = np.maximum(bank_chisq, cont_chisq)
elif chisq_choice == 'max_bank_trad':
chisq = np.maximum(bank_chisq, trad_chisq)
elif chisq_choice == 'max_bank_cont_trad':
chisq = np.maximum(np.maximum(bank_chisq, cont_chisq), trad_chisq)
else:
err_msg="Do not recognized --chisq-choice %s" % chisq_choice
raise ValueError(err_msg)
return chisq
def save_dict_to_hdf5(dic, filename):
"""
Parameters
----------
dic:
python dictionary to be converted to hdf5 format
filename:
desired name of hdf5 file
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic)
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type'%type(item))
| gpl-3.0 |
shuggiefisher/crowdstock | django/contrib/gis/geos/prototypes/__init__.py | 244 | 1319 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \
create_point, create_linestring, create_linearring, create_polygon, create_collection, \
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \
get_dims, get_num_coords, get_num_geoms, \
to_hex, to_wkb, to_wkt
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
| bsd-3-clause |
jordiclariana/ansible | lib/ansible/modules/cloud/cloudstack/cs_domain.py | 48 | 7691 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_domain
short_description: Manages domains on Apache CloudStack based clouds.
description:
- Create, update and remove domains.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
path:
description:
- Path of the domain.
- Prefix C(ROOT/) or C(/ROOT/) in path is optional.
required: true
network_domain:
description:
- Network domain for networks in the domain.
required: false
default: null
clean_up:
description:
- Clean up all domain resources like child domains and accounts.
- Considered on C(state=absent).
required: false
default: false
state:
description:
- State of the domain.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a domain
local_action:
module: cs_domain
path: ROOT/customers
network_domain: customers.example.com
# Create another subdomain
local_action:
module: cs_domain
path: ROOT/customers/xy
network_domain: xy.customers.example.com
# Remove a domain
local_action:
module: cs_domain
path: ROOT/customers/xy
state: absent
'''
RETURN = '''
---
id:
description: UUID of the domain.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of the domain.
returned: success
type: string
sample: customers
path:
description: Domain path.
returned: success
type: string
sample: /ROOT/customers
parent_domain:
description: Parent domain of the domain.
returned: success
type: string
sample: ROOT
network_domain:
description: Network domain of the domain.
returned: success
type: string
sample: example.local
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackDomain(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackDomain, self).__init__(module)
self.returns = {
'path': 'path',
'networkdomain': 'network_domain',
'parentdomainname': 'parent_domain',
}
self.domain = None
def _get_domain_internal(self, path=None):
if not path:
path = self.module.params.get('path')
if path.endswith('/'):
self.module.fail_json(msg="Path '%s' must not end with /" % path)
path = path.lower()
if path.startswith('/') and not path.startswith('/root/'):
path = "root" + path
elif not path.startswith('root/'):
path = "root/" + path
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if path == d['path'].lower():
return d
return None
def get_name(self):
# last part of the path is the name
name = self.module.params.get('path').split('/')[-1:]
return name
def get_domain(self, key=None):
if not self.domain:
self.domain = self._get_domain_internal()
return self._get_by_key(key, self.domain)
def get_parent_domain(self, key=None):
path = self.module.params.get('path')
# cut off last /*
path = '/'.join(path.split('/')[:-1])
if not path:
return None
parent_domain = self._get_domain_internal(path=path)
if not parent_domain:
self.module.fail_json(msg="Parent domain path %s does not exist" % path)
return self._get_by_key(key, parent_domain)
def present_domain(self):
domain = self.get_domain()
if not domain:
domain = self.create_domain(domain)
else:
domain = self.update_domain(domain)
return domain
def create_domain(self, domain):
self.result['changed'] = True
args = {}
args['name'] = self.get_name()
args['parentdomainid'] = self.get_parent_domain(key='id')
args['networkdomain'] = self.module.params.get('network_domain')
if not self.module.check_mode:
res = self.cs.createDomain(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
domain = res['domain']
return domain
def update_domain(self, domain):
args = {}
args['id'] = domain['id']
args['networkdomain'] = self.module.params.get('network_domain')
if self.has_changed(args, domain):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateDomain(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
domain = res['domain']
return domain
def absent_domain(self):
domain = self.get_domain()
if domain:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['id'] = domain['id']
args['cleanup'] = self.module.params.get('clean_up')
res = self.cs.deleteDomain(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'domain')
return domain
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
path = dict(required=True),
state = dict(choices=['present', 'absent'], default='present'),
network_domain = dict(default=None),
clean_up = dict(type='bool', default=False),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_dom = AnsibleCloudStackDomain(module)
state = module.params.get('state')
if state in ['absent']:
domain = acs_dom.absent_domain()
else:
domain = acs_dom.present_domain()
result = acs_dom.get_result(domain)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kritzware/PyBot | modules/modules/uptime.py | 2 | 1178 | import logging, coloredlogs
from datetime import datetime, timedelta, date, time
from pytz import timezone
from modules.config import *
from modules.api import API
streamOnlineCheck = API(1)
class Uptime:
CommandMain = 'uptime'
CommandMainOptions = []
CommandResponses = []
def __init__(self):
self.channel = CHANNEL
def execute_command(self, command):
from modules.bot import bot_msg
if streamOnlineCheck.check_stream_online():
self.uptime()
else:
bot_msg("{} is not streaming at the moment FeelsBadMan".format(self.channel))
def uptime():
from modules.bot import bot_msg
data = streamOnlineCheck.getJSON('https://api.twitch.tv/kraken/channels/{}/videos?limit=1&broadcasts=true'.format(self.channel))
latest_stream = data['videos'][0]['recorded_at']
timeformat = "%Y-%m-%dT%H:%M:%SZ"
start_date = datetime.strptime(latest_stream, timeformat)
current_date = datetime.utcnow()
output_date = current_date - start_date - timedelta(microseconds=current_date.microsecond)
hours = str(output_date)[:1]
minutes = str(output_date)[2:4]
bot_msg("{} has been live for {} hrs, {} mins FeelsGoodMan".format(self.channel, hours, minutes)) | mit |
benthomasson/ansible | lib/ansible/modules/cloud/openstack/os_keystone_domain.py | 29 | 5736 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_domain
short_description: Manage OpenStack Identity Domains
author:
- Monty
- Haneef Ali
extends_documentation_fragment: openstack
version_added: "2.1"
description:
- Create, update, or delete OpenStack Identity domains. If a domain
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name that has to be given to the instance
required: true
description:
description:
- Description of the domain
required: false
default: None
enabled:
description:
- Is the domain enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a domain
- os_keystone_domain:
cloud: mycloud
state: present
name: demo
description: Demo Domain
# Delete a domain
- os_keystone_domain:
cloud: mycloud
state: absent
name: demo
'''
RETURN = '''
domain:
description: Dictionary describing the domain.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Domain ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Domain name.
type: string
sample: "demo"
description:
description: Domain description.
type: string
sample: "Demo Domain"
enabled:
description: Domain description.
type: boolean
sample: True
id:
description: The domain ID.
returned: On success when I(state) is 'present'
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(module, domain):
if module.params['description'] is not None and \
domain.description != module.params['description']:
return True
if domain.enabled != module.params['enabled']:
return True
return False
def _system_state_change(module, domain):
state = module.params['state']
if state == 'absent' and domain:
return True
if state == 'present':
if domain is None:
return True
return _needs_update(module, domain)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
description = module.params['description']
enabled = module.params['enabled']
state = module.params['state']
try:
cloud = shade.operator_cloud(**module.params)
domains = cloud.search_domains(filters=dict(name=name))
if len(domains) > 1:
module.fail_json(msg='Domain name %s is not unique' % name)
elif len(domains) == 1:
domain = domains[0]
else:
domain = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, domain))
if state == 'present':
if domain is None:
domain = cloud.create_domain(
name=name, description=description, enabled=enabled)
changed = True
else:
if _needs_update(module, domain):
domain = cloud.update_domain(
domain.id, name=name, description=description,
enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, domain=domain, id=domain.id)
elif state == 'absent':
if domain is None:
changed=False
else:
cloud.delete_domain(domain.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
RyanHope/AutobahnPython | examples/asyncio/websocket/echo/client_coroutines.py | 9 | 2587 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
try:
import asyncio
except ImportError:
import trollius as asyncio
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
@asyncio.coroutine
def onOpen(self):
print("WebSocket connection open.")
# start sending messages every second ..
while True:
self.sendMessage(u"Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary=True)
yield from asyncio.sleep(1)
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
factory = WebSocketClientFactory(u"ws://127.0.0.1:9000", debug=False)
factory.protocol = MyClientProtocol
loop = asyncio.get_event_loop()
coro = loop.create_connection(factory, '127.0.0.1', 9000)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| mit |
Trust-Code/odoo-brazil-hr | l10n_br_hr_payroll/model/hr_contract.py | 2 | 6637 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Brazillian Human Resources Payroll module for OpenERP
# Copyright (C) 2014 KMEE (http://www.kmee.com.br)
# @author Matheus Felix <matheus.felix@kmee.com.br>
# Rafael da Silva Lima <rafael.lima@kmee.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import datetime
import time
from decimal import Decimal, ROUND_DOWN
class HrContract(orm.Model):
_inherit='hr.contract'
def _get_wage_ir(self, cr, uid, ids, fields, arg, context=None):
res = {}
obj_employee = self.pool.get('hr.employee')
employee_ids = obj_employee.search(cr, uid, [('contract_ids.id','=', ids[0])])
employees = obj_employee.browse(cr, uid, employee_ids, context=context)
for employee in employees:
for contract in employee.contract_ids:
if employee_ids:
INSS =(-482.93 if ((contract.wage) >= 4390.25)
else -((contract.wage) * 0.11) if ((contract.wage) >= 2195.13) and ((contract.wage) <= 4390.24)
else -((contract.wage) * 0.09) if ((contract.wage) >= 1317.08) and ((contract.wage) <= 2195.12)
else -((contract.wage) * 0.08))
lane = (contract.wage - employee.n_dependent + INSS)
first_lane = (-(0.275*(lane) - 826.15))
l1 = Decimal(str(first_lane))
lane1 = l1.quantize(Decimal('1.10'), rounding=ROUND_DOWN)
option_one = float(lane1)
second_lane = (-(0.225*(lane) - 602.96))
l2 = Decimal(str(second_lane))
lane2 = l2.quantize(Decimal('1.10'), rounding=ROUND_DOWN)
option_two = float(lane2)
third_lane = (-(0.150*(lane) - 335.03))
l3 = Decimal(str(third_lane))
lane3 = l3.quantize(Decimal('1.10'), rounding=ROUND_DOWN)
option_three = float(lane3)
fourth_lane = (-(0.075*(lane) - 134.08))
l4 = Decimal(str(fourth_lane))
lane4 = l4.quantize(Decimal('1.10'), rounding=ROUND_DOWN)
option_four = float(lane4)
if (lane >= 4463.81):
res[ids[0]] = option_one
return res
elif (lane <= 4463.80) and (lane >= 3572.44):
res[ids[0]] = option_two
return res
elif (lane <= 3572.43) and (lane >= 2679.30):
res[ids[0]] = option_three
return res
elif (lane <= 2679.29) and (lane >= 1787.78):
res[ids[0]] = option_four
return res
else:
return 0
def _get_worked_days(self, cr, uid, ids, fields, arg, context=None):
res = {}
obj_worked_days = self.pool.get('hr.payslip.worked_days')
worked_ids = obj_worked_days.search(cr, uid, [('contract_id', '=', ids[0])])
if worked_ids:
worked = obj_worked_days.browse(cr, uid, worked_ids[0])
res[ids[0]] = worked.number_of_days
return res
else:
res[ids[0]] = 0
return res
def _check_date(self, cr, uid, ids, fields, arg, context=None):
res = {}
comp_date_from = time.strftime('%Y-04-01')
comp_date_to = time.strftime('%Y-02-28')
obj_payslip = self.pool.get('hr.payslip')
payslip_ids = obj_payslip.search(cr, uid, [('contract_id', '=', ids[0]),
('date_from', '<', comp_date_from),
('date_to', '>', comp_date_to)])
if payslip_ids:
res[ids[0]] = True
return res
else:
res[ids[0]] = False
return res
def _check_voucher(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for contract in self.browse(cr, uid, ids):
if user.company_id.check_benefits:
return True
else:
if contract.value_va == 0 or contract.value_vr == 0:
return True
else:
return False
return True
_columns = {
'value_va': fields.float('Valley Food', help='Daily Value Benefit'),
'value_vr': fields.float('Meal valley', help='Daily Value Benefit'),
'workeddays': fields.function(_get_worked_days, type='float'),
'transportation_voucher': fields.float('Valley Transportation', help='Percentage of monthly deduction'),
'health_insurance_father' : fields.float('Employee Health Plan', help='Health Plan of the Employee'),
'health_insurance_dependent' : fields.float('Dependent Health Plan', help='Health Plan for Spouse and Dependents'),
'calc_date': fields.function(_check_date, type='boolean'),
'aditional_benefits': fields.float('Aditional Benefits', help='Others employee benefits'),
'ir_value': fields.function(_get_wage_ir, type="float",digits_compute=dp.get_precision('Payroll')),
}
_constraints = [[_check_voucher, u'The company settings do not allow the use of food voucher and simultaneous meal', ['value_va', 'value_vr']]]
_defaults = {
'value_va' : 0,
'value_vr' : 0
}
| agpl-3.0 |
mrquim/repository.mrquim | repo/script.module.liveresolver/lib/liveresolver/resolvers/vaughnlive.py | 10 | 1761 | # -*- coding: utf-8 -*-
import re,urlparse
from liveresolver.modules import client,constants
from liveresolver.modules.log_utils import log
def resolve(url):
try:
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = url
channel = urlparse.urlparse(url).path
channel = re.compile('/([\w]+)').findall(channel)[-1]
domain = urlparse.urlparse(url).netloc
pageUrl = urlparse.urljoin('http://%s' % domain, channel)
if 'instagib' in domain: playpath = 'instagib_%s' % channel
elif 'breakers' in domain: playpath = 'btv_%s' % channel
elif 'vapers' in domain: playpath = 'vtv_%s' % channel
else: playpath = 'live_%s' % channel
import requests
s = requests.session()
result = s.get(pageUrl).text
swfUrl = re.compile('"(/\d+/swf/[0-9A-Za-z]+\.swf)').findall(result)[0]
swfUrl = urlparse.urljoin(pageUrl, swfUrl)
s.headers = {'User-agent':client.agent(),'X-Requested-With':constants.get_shockwave(),'Accept-Encoding':'gzip, deflate, lzma, sdch','Connection':'keep-alive','Host':'mvn.vaughnsoft.net','Referer':'http://vaughnlive.tv/' + channel}
infoUrl = 'http://mvn.vaughnsoft.net/video/edge/mnv-%s' % (playpath)
result = s.get(infoUrl).text
streamer = re.compile('(.+?);').findall(result)[0]
streamer = 'rtmp://%s/live' % streamer
app = re.compile('mvnkey-(.+)').findall(result)[0].replace("0m0", "")
app = 'live?%s' % app
url = '%s app=%s playpath=%s pageUrl=http://vaughnlive.tv/ swfUrl=%s live=true flashver=%s timeout=15' % (streamer, app, playpath,swfUrl,constants.flash_ver())
return url
except:
return
| gpl-2.0 |
Jorge-Rodriguez/ansible | lib/ansible/plugins/lookup/vars.py | 55 | 3004 | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: vars
author: Ansible Core
version_added: "2.5"
short_description: Lookup templated value of variables
description:
- Retrieves the value of an Ansible variable.
options:
_terms:
description: The variable names to look up.
required: True
default:
description:
- What to return if a variable is undefined.
- If no default is set, it will result in an error if any of the variables is undefined.
"""
EXAMPLES = """
- name: Show value of 'variablename'
debug: msg="{{ lookup('vars', 'variabl' + myvar)}}"
vars:
variablename: hello
myvar: ename
- name: Show default empty since i dont have 'variablnotename'
debug: msg="{{ lookup('vars', 'variabl' + myvar, default='')}}"
vars:
variablename: hello
myvar: notename
- name: Produce an error since i dont have 'variablnotename'
debug: msg="{{ lookup('vars', 'variabl' + myvar)}}"
ignore_errors: True
vars:
variablename: hello
myvar: notename
- name: find several related variables
debug: msg="{{ lookup('vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}"
- name: alternate way to find some 'prefixed vars' in loop
debug: msg="{{ lookup('vars', 'ansible_play_' + item) }}"
loop:
- hosts
- batch
- hosts_all
"""
RETURN = """
_value:
description:
- value of the variables requested.
"""
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if variables is not None:
self._templar.set_available_variables(variables)
myvars = getattr(self._templar, '_available_variables', {})
self.set_options(direct=kwargs)
default = self.get_option('default')
ret = []
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
try:
try:
value = myvars[term]
except KeyError:
try:
value = myvars['hostvars'][myvars['inventory_hostname']][term]
except KeyError:
raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)
ret.append(self._templar.template(value, fail_on_undefined=True))
except AnsibleUndefinedVariable:
if default is not None:
ret.append(default)
else:
raise
return ret
| gpl-3.0 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/tests/runtests.py | 49 | 13187 | #!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from django import contrib
from django.utils._os import upath
from django.utils import six
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'requirements',
'templates',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
'test_runner_invalid_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'django.contrib.humanize',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
]
def get_test_modules():
modules = []
for modpath, dirpath in (
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)):
for f in os.listdir(dirpath):
if ('.' in f or
# Python 3 byte code dirs (PEP 3147)
f == '__pycache__' or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f)):
continue
modules.append((modpath, f))
return modules
def get_installed():
from django.db.models.loading import get_apps
return [app.__name__.rsplit('.', 1)[0] for app in get_apps()]
def setup(verbosity, test_labels):
from django.conf import settings
from django.db.models.loading import get_apps, load_app
from django.test.testcases import TransactionTestCase, TestCase
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
# Load all the ALWAYS_INSTALLED_APPS.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'django.contrib.comments is deprecated and will be removed before Django 1.8.', PendingDeprecationWarning)
get_apps()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
# If GeoDjango, then we'll want to add in the test applications
# that are a part of its test suite.
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
if HAS_SPATIAL_DB:
from django.contrib.gis.tests import geo_apps
test_modules.extend(geo_apps())
settings.INSTALLED_APPS.extend(['django.contrib.gis', 'django.contrib.sitemaps'])
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
match = lambda label: (
module_label == label or # exact match
module_label.startswith(label + '.') # ancestor match
)
module_found_in_labels = any(match(l) for l in test_labels_set)
if module_found_in_labels:
if verbosity >= 2:
print("Importing application %s" % module_name)
mod = load_app(module_label)
if mod:
if module_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(module_label)
return state
def teardown(state):
from django.conf import settings
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels)/2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--bisect', action='store', dest='bisect', default=None,
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_option(
'--pair', action='store', dest='pair', default=None,
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_option(
'--selenium', action='store_true', dest='selenium',
default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. "
"Set it or use --settings.")
else:
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, args)
elif options.pair:
paired_tests(options.pair, options, args)
else:
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
| apache-2.0 |
zbyte64/django-dockitcms | dockitcms/widgetblock/fields.py | 1 | 1521 | from django import forms
from dockit import schema
from dockitcms.fields import BaseFieldEntry, ListFieldMixin
from dockitcms.widgetblock.models import Widget
class WidgetField(BaseFieldEntry):
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(WidgetField, self).get_field_kwargs())
kwargs['schema'] = Widget
return kwargs
class Meta:
typed_key = 'WidgetField'
class ListWidgetField(ListFieldMixin, WidgetField):
def get_list_field_kwargs(self):
subfield = WidgetField.create_field(self)
return {'subfield': subfield}
class Meta:
typed_key = 'ListWidgetField'
class VisibleSchemaTypeField(schema.SchemaTypeField):
form_field_class = forms.ChoiceField
form_widget_class = forms.Select
def formfield_kwargs(self, **kwargs):
kwargs = super(VisibleSchemaTypeField, self).formfield_kwargs(**kwargs)
kwargs['choices'] = self.get_choices()
return kwargs
class TypedWidgetField(BaseFieldEntry):
widget_type = VisibleSchemaTypeField(schemas=Widget._meta.fields['widget_type'].schemas)
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(TypedWidgetField, self).get_field_kwargs())
kwargs.pop('widget_type', None)
kwargs['schema'] = Widget._meta.fields['widget_type'].schemas.get(self.widget_type, Widget)
return kwargs
class Meta:
typed_key = 'TypedWidgetField'
| bsd-3-clause |
mhuwiler/rootauto | main/python/rooteventselector.py | 11 | 3418 | #!/usr/bin/env @python@
# ROOT command line tools: rooteventselector
# Author: Julien Ripoche
# Mail: julien.ripoche@u-psud.fr
# Date: 20/08/15
# Additions
# Author: Lawrence Lee
# Mail: lawrence.lee.jr@cern.ch
# Date: 1/4/16
"""Command line to copy subsets of trees from source ROOT files to new trees on a destination ROOT file"""
import cmdLineUtils
import sys
# Help strings
COMMAND_HELP = "Copy subsets of trees from source ROOT files"
FIRST_EVENT_HELP = "specify the first event to copy"
LAST_EVENT_HELP = "specify the last event to copy"
EPILOG="""Examples:
- rooteventselector source.root:tree dest.root
Copy the tree 'tree' from 'source.root' to 'dest.root'.
- rooteventselector -f 101 source.root:tree dest.root
Copy a subset of the tree 'tree' from 'source.root' to 'dest.root'. The new tree contains events from the old tree except the first hundred.
- rooteventselector -l 100 source.root:tree dest.root
Copy a subset of the tree 'tree' from 'source.root' to 'dest.root'. The new tree contains the first hundred events from the old tree.
- rooteventselector --recreate source.root:tree dest.root
Recreate the destination file 'dest.root' and copy the tree 'tree' from 'source.root' to 'dest.root'.
- rooteventselector -c 1 source.root:tree dest.root
Change the compression factor of the destination file 'dest.root' and copy the tree 'tree' from 'source.root' to 'dest.root'. For more information about compression settings of ROOT file, please look at the reference guide available on the ROOT site.
- rooteventselector -s "(branch1Value > 100)&&( branch2Value )" source.root:tree dest.root
Copy the tree 'tree' from 'source.root' to 'dest.root' and apply a selection to the output tree.
- rooteventselector -e "muon_*" source.root:tree dest.root
Copy the tree 'tree' from 'source.root' to 'dest.root' and remove branches matching "muon_*"
- rooteventselector -e "*" -i "muon_*" source.root:tree dest.root
Copy the tree 'tree' from 'source.root' to 'dest.root' and only write branches matching "muon_*"
"""
def execute():
# Collect arguments with the module argparse
parser = cmdLineUtils.getParserSourceDest(COMMAND_HELP, EPILOG)
parser.add_argument("-c","--compress", type=int, help=cmdLineUtils.COMPRESS_HELP)
parser.add_argument("--recreate", help=cmdLineUtils.RECREATE_HELP, action="store_true")
parser.add_argument("-f","--first", type=int, default=0, help=FIRST_EVENT_HELP)
parser.add_argument("-l","--last", type=int, default=-1, help=LAST_EVENT_HELP)
parser.add_argument("-s","--selection", default="")
parser.add_argument("-i","--branchinclude", default="")
parser.add_argument("-e","--branchexclude", default="")
# Put arguments in shape
sourceList, destFileName, destPathSplit, optDict = cmdLineUtils.getSourceDestListOptDict(parser)
# Process rootEventselector
return cmdLineUtils.rootEventselector(sourceList, destFileName, destPathSplit, \
compress=optDict["compress"], recreate=optDict["recreate"], \
first=optDict["first"], last=optDict["last"], \
selectionString=optDict["selection"], \
branchinclude=optDict["branchinclude"],\
branchexclude=optDict["branchexclude"])
sys.exit(execute())
| lgpl-2.1 |
vanloswang/linux | scripts/gdb/linux/symbols.py | 588 | 6302 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['module_core']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = arg.split()
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 |
mhrivnak/pulp | client_lib/pulp/client/launcher.py | 1 | 8499 | """
Entry point for both the admin and consumer clients. The config file location
is passed in and its contents are used to drive the rest of the client execution.
"""
import errno
from gettext import gettext as _
import logging
import logging.handlers
from optparse import OptionParser
import os
import stat
import sys
from okaara.prompt import COLOR_CYAN, COLOR_LIGHT_CYAN
from pulp.bindings.bindings import Bindings
from pulp.bindings.server import PulpConnection
from pulp.client import constants
from pulp.client.extensions.core import PulpPrompt, PulpCli, ClientContext, WIDTH_TERMINAL
from pulp.client.extensions.exceptions import ExceptionHandler
import pulp.client.extensions.loader as extensions_loader
from pulp.common.config import Config
def main(config, exception_handler_class=ExceptionHandler):
"""
Entry point into the launcher. Any extra necessary values will be pulled
from the given configuration files.
@param config: The CLI configuration.
@type config: Config
@return: exit code suitable to return to the shell launching the client
"""
ensure_user_pulp_dir()
# Command line argument handling
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-u', '--username', dest='username', action='store', default=None,
help=_('username for the Pulp server; if used will bypass the stored '
'certificate and override a username specified in ~/.pulp/admin.conf'))
parser.add_option('-p', '--password', dest='password', action='store', default=None,
help=_('password for the Pulp server; must be used with --username. '
'if used will bypass the stored certificate and override a password '
'specified in ~/.pulp/admin.conf'))
parser.add_option('--config', dest='config', default=None,
help=_('absolute path to the configuration file'))
parser.add_option('--map', dest='print_map', action='store_true', default=False,
help=_('prints a map of the CLI sections and commands'))
parser.add_option(
'-v', dest='verbose', action='count',
help=_('enables verbose output; use twice for increased verbosity with debug information'))
options, args = parser.parse_args()
# Configuration and Logging
if options.config is not None:
config.update(Config(options.config))
logger = _initialize_logging(verbose=options.verbose)
# General UI pieces
prompt = _create_prompt(config)
exception_handler = exception_handler_class(prompt, config)
# REST Bindings
username = options.username
password = options.password
if not username and not password:
# Try to get username/password from config if not explicitly set. username and password are
# not included by default so we need to catch KeyError Exceptions.
try:
username = config['auth']['username']
password = config['auth']['password']
except KeyError:
pass
if username and not password:
prompt_msg = 'Enter password: '
password = prompt.prompt_password(_(prompt_msg))
if password is prompt.ABORT:
prompt.render_spacer()
prompt.write(_('Login cancelled'))
sys.exit(os.EX_NOUSER)
server = _create_bindings(config, logger, username, password, verbose=options.verbose)
# Client context
context = ClientContext(server, config, logger, prompt, exception_handler)
cli = PulpCli(context)
context.cli = cli
# Load extensions into the UI in the context
extensions_dir = config['filesystem']['extensions_dir']
extensions_dir = os.path.expanduser(extensions_dir)
role = config['client']['role']
try:
extensions_loader.load_extensions(extensions_dir, context, role)
except extensions_loader.LoadFailed, e:
prompt.write(
_('The following extensions failed to load: %(f)s' % {'f': ', '.join(e.failed_packs)}))
prompt.write(_('More information on the failures may be found by using -v option one or '
'more times'))
return os.EX_OSFILE
# Launch the appropriate UI (add in shell support here later)
if options.print_map:
cli.print_cli_map(section_color=COLOR_LIGHT_CYAN, command_color=COLOR_CYAN)
return os.EX_OK
else:
code = cli.run(args)
return code
def ensure_user_pulp_dir():
"""
Creates ~/.pulp/ if it doesn't already exist.
Writes a warning to stderr if ~/.pulp/ has unsafe permissions.
This has to be run before the prompt object gets created, hence the old-school error reporting.
Several other places try to access ~/.pulp, both from pulp-admin and pulp-consumer. The best
we can do in order to create it once with the right permissions is to do call this function
early.
"""
path = os.path.expanduser(constants.USER_CONFIG_DIR)
# 0700
desired_mode = stat.S_IRUSR + stat.S_IWUSR + stat.S_IXUSR
try:
stats = os.stat(path)
actual_mode = stat.S_IMODE(stats.st_mode)
if actual_mode != desired_mode:
sys.stderr.write(_('Warning: path should have mode 0700 because it may contain '
'sensitive information: %(p)s\n\n' % {'p': path}))
except Exception, e:
# if it doesn't exist, make it
if isinstance(e, OSError) and e.errno == errno.ENOENT:
try:
os.mkdir(path, 0700)
except Exception, e:
sys.stderr.write(_('Failed to create path %(p)s: %(e)s\n\n' %
{'p': path, 'e': str(e)}))
sys.exit(1)
else:
sys.stderr.write(_('Failed to access path %(p)s: %(e)s\n\n' % {'p': path, 'e': str(e)}))
sys.exit(1)
def _initialize_logging(verbose=None):
"""
@return: configured cli logger
"""
cli_log_handler = logging.StreamHandler(sys.stderr)
cli_log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
cli_logger = logging.getLogger('pulp')
cli_logger.addHandler(cli_log_handler)
if not verbose:
cli_logger.setLevel(logging.FATAL)
elif verbose == 1:
cli_logger.setLevel(logging.INFO)
else:
cli_logger.setLevel(logging.DEBUG)
return cli_logger
def _create_bindings(config, cli_logger, username, password, verbose=None):
"""
@return: bindings with a fully configured Pulp connection
@rtype: pulp.bindings.bindings.Bindings
"""
# Extract all of the necessary values
hostname = config['server']['host']
port = int(config['server']['port'])
cert_dir = config['filesystem']['id_cert_dir']
cert_name = config['filesystem']['id_cert_filename']
cert_dir = os.path.expanduser(cert_dir) # this will likely be in a user directory
cert_filename = os.path.join(cert_dir, cert_name)
# If the certificate doesn't exist, don't pass it to the connection creation
if not os.path.exists(cert_filename):
cert_filename = None
api_logger = None
if verbose and verbose > 1:
api_log_handler = logging.StreamHandler(sys.stderr)
api_log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
api_logger = logging.getLogger('call_log')
api_logger.addHandler(api_log_handler)
api_logger.setLevel(logging.INFO)
# Create the connection and bindings
verify_ssl = config.parse_bool(config['server']['verify_ssl'])
ca_path = config['server']['ca_path']
conn = PulpConnection(
hostname, port, username=username, password=password, cert_filename=cert_filename,
logger=cli_logger, api_responses_logger=api_logger, verify_ssl=verify_ssl,
ca_path=ca_path)
bindings = Bindings(conn)
return bindings
def _create_prompt(config):
"""
@return: prompt instance to pass throughout the UI
@rtype: PulpPrompt
"""
enable_color = config.parse_bool(config['output']['enable_color'])
fallback_wrap = int(config['output']['wrap_width'])
if config.parse_bool(config['output']['wrap_to_terminal']):
wrap = WIDTH_TERMINAL
else:
wrap = fallback_wrap
prompt = PulpPrompt(enable_color=enable_color, wrap_width=wrap, fallback_wrap=fallback_wrap)
return prompt
| gpl-2.0 |
duanwujie/depot_tools | breakpad.py | 47 | 3941 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Breakpad for Python.
Sends a notification when a process stops on an exception.
It is only enabled when all these conditions are met:
1. hostname finishes with '.google.com' or 'chromium.org'
2. main module name doesn't contain the word 'test'
3. no NO_BREAKPAD environment variable is defined
"""
import atexit
import getpass
import os
import socket
import sys
import time
import traceback
import urllib
import urllib2
# Configure these values.
DEFAULT_URL = 'https://chromium-status.appspot.com'
# Global variable to prevent double registration.
_REGISTERED = False
_TIME_STARTED = time.time()
_HOST_NAME = socket.getfqdn()
# Skip unit tests and we don't want anything from non-googler.
IS_ENABLED = (
not 'test' in getattr(sys.modules['__main__'], '__file__', '') and
not 'NO_BREAKPAD' in os.environ and
_HOST_NAME.endswith(('.google.com', '.chromium.org')))
def post(url, params):
"""HTTP POST with timeout when it's supported."""
if not IS_ENABLED:
# Make sure to not send anything for non googler.
return
kwargs = {}
if (sys.version_info[0] * 10 + sys.version_info[1]) >= 26:
kwargs['timeout'] = 4
try:
request = urllib2.urlopen(url, urllib.urlencode(params), **kwargs)
out = request.read()
request.close()
return out
except IOError:
return 'There was a failure while trying to send the stack trace. Too bad.'
def FormatException(e):
"""Returns a human readable form of an exception.
Adds the maximum number of interesting information in the safest way."""
try:
out = repr(e)
except Exception:
out = ''
try:
out = str(e)
if isinstance(e, Exception):
# urllib exceptions, usually the HTTP headers.
if hasattr(e, 'headers'):
out += '\nHeaders: %s' % e.headers
if hasattr(e, 'url'):
out += '\nUrl: %s' % e.url
if hasattr(e, 'msg'):
out += '\nMsg: %s' % e.msg
# The web page in some urllib exceptions.
if hasattr(e, 'read') and callable(e.read):
out += '\nread(): %s' % e.read()
if hasattr(e, 'info') and callable(e.info):
out += '\ninfo(): %s' % e.info()
except Exception:
pass
return out
def SendStack(last_tb, stack, url=None, maxlen=50, verbose=True):
"""Sends the stack trace to the breakpad server."""
if not IS_ENABLED:
return
def p(o):
if verbose:
print(o)
p('Sending crash report ...')
params = {
'args': sys.argv,
'cwd': os.getcwd(),
'exception': FormatException(last_tb),
'host': _HOST_NAME,
'stack': stack[0:4096],
'user': getpass.getuser(),
'version': sys.version,
}
p('\n'.join(' %s: %s' % (k, params[k][0:maxlen]) for k in sorted(params)))
p(post(url or DEFAULT_URL + '/breakpad', params))
def SendProfiling(duration, url=None):
params = {
'argv': ' '.join(sys.argv),
# Strip the hostname.
'domain': _HOST_NAME.split('.', 1)[-1],
'duration': duration,
'platform': sys.platform,
}
post(url or DEFAULT_URL + '/profiling', params)
def CheckForException():
"""Runs at exit. Look if there was an exception active."""
last_value = getattr(sys, 'last_value', None)
if last_value:
if not isinstance(last_value, KeyboardInterrupt):
last_tb = getattr(sys, 'last_traceback', None)
if last_tb:
SendStack(last_value, ''.join(traceback.format_tb(last_tb)))
else:
duration = time.time() - _TIME_STARTED
if duration > 90:
SendProfiling(duration)
def Register():
"""Registers the callback at exit. Calling it multiple times is no-op."""
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
atexit.register(CheckForException)
if IS_ENABLED:
Register()
# Uncomment this line if you want to test it out.
#Register()
| bsd-3-clause |
fernandog/Sick-Beard | lib/hachoir_parser/archive/rar.py | 90 | 13384 | """
RAR parser
Status: can only read higher-level attructures
Author: Christophe Gisquet
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, Enum,
UInt8, UInt16, UInt32, UInt64,
String, TimeDateMSDOS32,
NullBytes, NullBits, RawBytes)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.msdos import MSDOSFileAttr32
MAX_FILESIZE = 1000 * 1024 * 1024
BLOCK_NAME = {
0x72: "Marker",
0x73: "Archive",
0x74: "File",
0x75: "Comment",
0x76: "Extra info",
0x77: "Subblock",
0x78: "Recovery record",
0x79: "Archive authenticity",
0x7A: "New-format subblock",
0x7B: "Archive end",
}
COMPRESSION_NAME = {
0x30: "Storing",
0x31: "Fastest compression",
0x32: "Fast compression",
0x33: "Normal compression",
0x34: "Good compression",
0x35: "Best compression"
}
OS_MSDOS = 0
OS_WIN32 = 2
OS_NAME = {
0: "MS DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
}
DICTIONARY_SIZE = {
0: "Dictionary size 64 Kb",
1: "Dictionary size 128 Kb",
2: "Dictionary size 256 Kb",
3: "Dictionary size 512 Kb",
4: "Dictionary size 1024 Kb",
7: "File is a directory",
}
def formatRARVersion(field):
"""
Decodes the RAR version stored on 1 byte
"""
return "%u.%u" % divmod(field.value, 10)
def commonFlags(s):
yield Bit(s, "has_added_size", "Additional field indicating additional size")
yield Bit(s, "is_ignorable", "Old versions of RAR should ignore this block when copying data")
class ArchiveFlags(StaticFieldSet):
format = (
(Bit, "vol", "Archive volume"),
(Bit, "has_comment", "Whether there is a comment"),
(Bit, "is_locked", "Archive volume"),
(Bit, "is_solid", "Whether files can be extracted separately"),
(Bit, "new_numbering", "New numbering, or compressed comment"), # From unrar
(Bit, "has_authenticity_information", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_protected", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_passworded", "Needs a password to be decrypted"),
(Bit, "is_first_vol", "Whether it is the first volume"),
(Bit, "is_encrypted", "Whether the encryption version is present"),
(NullBits, "internal", 6, "Reserved for 'internal use'")
)
def archiveFlags(s):
yield ArchiveFlags(s, "flags", "Archiver block flags")
def archiveHeader(s):
yield NullBytes(s, "reserved[]", 2, "Reserved word")
yield NullBytes(s, "reserved[]", 4, "Reserved dword")
def commentHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Comment header size + comment size"))
yield filesizeHandler(UInt16(s, "uncompressed_size", "Uncompressed comment size"))
yield UInt8(s, "required_version", "RAR version needed to extract comment")
yield UInt8(s, "packing_method", "Comment packing method")
yield UInt16(s, "comment_crc16", "Comment CRC")
def commentBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "comment_data", size, "Compressed comment data")
def signatureHeader(s):
yield TimeDateMSDOS32(s, "creation_time")
yield filesizeHandler(UInt16(s, "arc_name_size"))
yield filesizeHandler(UInt16(s, "user_name_size"))
def recoveryHeader(s):
yield filesizeHandler(UInt32(s, "total_size"))
yield textHandler(UInt8(s, "version"), hexadecimal)
yield UInt16(s, "rec_sectors")
yield UInt32(s, "total_blocks")
yield RawBytes(s, "mark", 8)
def avInfoHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Total block size"))
yield UInt8(s, "version", "Version needed to decompress", handler=hexadecimal)
yield UInt8(s, "method", "Compression method", handler=hexadecimal)
yield UInt8(s, "av_version", "Version for AV", handler=hexadecimal)
yield UInt32(s, "av_crc", "AV info CRC32", handler=hexadecimal)
def avInfoBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "av_info_data", size, "AV info")
class FileFlags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "continued_from", "File continued from previous volume")
yield Bit(self, "continued_in", "File continued in next volume")
yield Bit(self, "is_encrypted", "File encrypted with password")
yield Bit(self, "has_comment", "File comment present")
yield Bit(self, "is_solid", "Information from previous files is used (solid flag)")
# The 3 following lines are what blocks more staticity
yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), DICTIONARY_SIZE)
for bit in commonFlags(self):
yield bit
yield Bit(self, "is_large", "file64 operations needed")
yield Bit(self, "is_unicode", "Filename also encoded using Unicode")
yield Bit(self, "has_salt", "Has salt for encryption")
yield Bit(self, "uses_file_version", "File versioning is used")
yield Bit(self, "has_ext_time", "Extra time ??")
yield Bit(self, "has_ext_flags", "Extra flag ??")
def fileFlags(s):
yield FileFlags(s, "flags", "File block flags")
class ExtTime(FieldSet):
def createFields(self):
yield textHandler(UInt16(self, "time_flags", "Flags for extended time"), hexadecimal)
flags = self["time_flags"].value
for index in xrange(4):
rmode = flags >> ((3-index)*4)
if rmode & 8:
if index:
yield TimeDateMSDOS32(self, "dos_time[]", "DOS Time")
if rmode & 3:
yield RawBytes(self, "remainder[]", rmode & 3, "Time remainder")
def specialHeader(s, is_file):
yield filesizeHandler(UInt32(s, "compressed_size", "Compressed size (bytes)"))
yield filesizeHandler(UInt32(s, "uncompressed_size", "Uncompressed size (bytes)"))
yield Enum(UInt8(s, "host_os", "Operating system used for archiving"), OS_NAME)
yield textHandler(UInt32(s, "crc32", "File CRC32"), hexadecimal)
yield TimeDateMSDOS32(s, "ftime", "Date and time (MS DOS format)")
yield textHandler(UInt8(s, "version", "RAR version needed to extract file"), formatRARVersion)
yield Enum(UInt8(s, "method", "Packing method"), COMPRESSION_NAME)
yield filesizeHandler(UInt16(s, "filename_length", "File name size"))
if s["host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(s, "file_attr", "File attributes")
else:
yield textHandler(UInt32(s, "file_attr", "File attributes"), hexadecimal)
# Start additional field from unrar
if s["flags/is_large"].value:
yield filesizeHandler(UInt64(s, "large_size", "Extended 64bits filesize"))
# End additional field
size = s["filename_length"].value
if size > 0:
if s["flags/is_unicode"].value:
charset = "UTF-8"
else:
charset = "ISO-8859-15"
yield String(s, "filename", size, "Filename", charset=charset)
# Start additional fields from unrar - file only
if is_file:
if s["flags/has_salt"].value:
yield textHandler(UInt8(s, "salt", "Salt"), hexadecimal)
if s["flags/has_ext_time"].value:
yield ExtTime(s, "extra_time", "Extra time info")
def fileHeader(s):
return specialHeader(s, True)
def fileBody(s):
# File compressed data
size = s["compressed_size"].value
if s["flags/is_large"].value:
size += s["large_size"].value
if size > 0:
yield RawBytes(s, "compressed_data", size, "File compressed data")
def fileDescription(s):
return "File entry: %s (%s)" % \
(s["filename"].display, s["compressed_size"].display)
def newSubHeader(s):
return specialHeader(s, False)
class EndFlags(StaticFieldSet):
format = (
(Bit, "has_next_vol", "Whether there is another next volume"),
(Bit, "has_data_crc", "Whether a CRC value is present"),
(Bit, "rev_space"),
(Bit, "has_vol_number", "Whether the volume number is present"),
(Bits, "unused[]", 4),
(Bit, "has_added_size", "Additional field indicating additional size"),
(Bit, "is_ignorable", "Old versions of RAR should ignore this block when copying data"),
(Bits, "unused[]", 6),
)
def endFlags(s):
yield EndFlags(s, "flags", "End block flags")
class BlockFlags(FieldSet):
static_size = 16
def createFields(self):
yield textHandler(Bits(self, "unused[]", 8, "Unused flag bits"), hexadecimal)
yield Bit(self, "has_added_size", "Additional field indicating additional size")
yield Bit(self, "is_ignorable", "Old versions of RAR should ignore this block when copying data")
yield Bits(self, "unused[]", 6)
class Block(FieldSet):
BLOCK_INFO = {
# None means 'use default function'
0x72: ("marker", "Archive header", None, None, None),
0x73: ("archive_start", "Archive info", archiveFlags, archiveHeader, None),
0x74: ("file[]", fileDescription, fileFlags, fileHeader, fileBody),
0x75: ("comment[]", "Stray comment", None, commentHeader, commentBody),
0x76: ("av_info[]", "Extra information", None, avInfoHeader, avInfoBody),
0x77: ("sub_block[]", "Stray subblock", None, newSubHeader, fileBody),
0x78: ("recovery[]", "Recovery block", None, recoveryHeader, None),
0x79: ("signature", "Signature block", None, signatureHeader, None),
0x7A: ("new_sub_block[]", "Stray new-format subblock", fileFlags,
newSubHeader, fileBody),
0x7B: ("archive_end", "Archive end block", endFlags, None, None),
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
t = self["block_type"].value
if t in self.BLOCK_INFO:
self._name, desc, parseFlags, parseHeader, parseBody = self.BLOCK_INFO[t]
if callable(desc):
self.createDescription = lambda: desc(self)
elif desc:
self._description = desc
if parseFlags : self.parseFlags = lambda: parseFlags(self)
if parseHeader : self.parseHeader = lambda: parseHeader(self)
if parseBody : self.parseBody = lambda: parseBody(self)
else:
self.info("Processing as unknown block block of type %u" % type)
self._size = 8*self["block_size"].value
if t == 0x74 or t == 0x7A:
self._size += 8*self["compressed_size"].value
if "is_large" in self["flags"] and self["flags/is_large"].value:
self._size += 8*self["large_size"].value
elif "has_added_size" in self:
self._size += 8*self["added_size"].value
# TODO: check if any other member is needed here
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Block CRC16"), hexadecimal)
yield textHandler(UInt8(self, "block_type", "Block type"), hexadecimal)
# Parse flags
for field in self.parseFlags():
yield field
# Get block size
yield filesizeHandler(UInt16(self, "block_size", "Block size"))
# Parse remaining header
for field in self.parseHeader():
yield field
# Finish header with stuff of unknow size
size = self["block_size"].value - (self.current_size//8)
if size > 0:
yield RawBytes(self, "unknown", size, "Unknow data (UInt32 probably)")
# Parse body
for field in self.parseBody():
yield field
def createDescription(self):
return "Block entry: %s" % self["type"].display
def parseFlags(self):
yield BlockFlags(self, "flags", "Block header flags")
def parseHeader(self):
if "has_added_size" in self["flags"] and \
self["flags/has_added_size"].value:
yield filesizeHandler(UInt32(self, "added_size",
"Supplementary block size"))
def parseBody(self):
"""
Parse what is left of the block
"""
size = self["block_size"].value - (self.current_size//8)
if "has_added_size" in self["flags"] and self["flags/has_added_size"].value:
size += self["added_size"].value
if size > 0:
yield RawBytes(self, "body", size, "Body data")
class RarFile(Parser):
MAGIC = "Rar!\x1A\x07\x00"
PARSER_TAGS = {
"id": "rar",
"category": "archive",
"file_ext": ("rar",),
"mime": (u"application/x-rar-compressed", ),
"min_size": 7*8,
"magic": ((MAGIC, 0),),
"description": "Roshal archive (RAR)",
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
pos = self.stream.searchBytes("\xC4\x3D\x7B\x00\x40\x07\x00", start, end)
if pos is not None:
return pos + 7*8
return None
| gpl-3.0 |
sarthfrey/Texty | lib/werkzeug/wsgi.py | 80 | 37881 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
from werkzeug.filesystem import get_filesystem_encoding
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This first checks
the `X-Forwarded-Host` header, then the normal `Host` header, and finally
the `SERVER_NAME` environment variable (using the first one it finds).
Optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates whether the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/' + '/'.join(x for x in cleaned_path.split('/')
if x and x != '..')
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit('/', 1)
path_info = '/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| apache-2.0 |
BackupTheBerlios/espressopp | examples/hadress/hadressFEC/hadressDensityFEC.py | 1 | 8414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# relevant imports
import sys
import time
import espresso
import mpi4py.MPI as MPI
import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid
from espresso import Real3D, Int3D
from espresso.tools import decomp
from espresso.tools import timers
# integration steps, cutoff, skin, AdResS specifications
steps = 1000
timestep = 0.0005
intervals = 100
rc = 4.5 # cutoff coarse-grained potential
rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA)
skin = 0.4
# Parameters for the thermostat
#gamma = 2.0
#temp = 1.0
# Parameters for size of AdResS dimensions
ex_size = 5.0
hy_size = 5.0
# read equilibrated configuration file
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espresso.tools.readxyz("equilibrated_conf.xyz")
# Table for coarse-grained potential
tabCG = "table_potential.dat"
# FEC compensation table
tabFEC = "table_FEC_Gibbs.dat"
# number of CG particles
num_particlesCG = len(x)/4
# number of AT particles
num_particles = len(x)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
# (H-)AdResS domain decomposition
system.storage = espresso.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# prepare AT particles
allParticlesAT = []
allParticles = []
tuples = []
for pidAT in range(num_particles):
allParticlesAT.append([pidAT, # add here these particles just temporarily
Real3D(x[pidAT], y[pidAT], z[pidAT]), # position
Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity
Real3D(0, 0, 0), # force
1, 1.0, 1]) # type, mass, is AT particle
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
cmp = espresso.tools.AdressSetCG(4, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles]
for pidAT2 in range(4):
pid = pidCG*4+pidAT2
tmptuple.append(pid)
# append CG particles
allParticles.append([pidCG+num_particles, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
0, 4.0, 0]) # type, mass, is not AT particle
# append AT particles
for pidAT in range(4):
pid = pidCG*4+pidAT
allParticles.append([pid, # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat")
# create FixedTupleList object
ftpl = espresso.FixedTupleListAdress(system.storage)
# and add the tuples
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# add bonds between AT particles
fpl = espresso.FixedPairListAdress(system.storage, ftpl)
bonds = Tetracryst.makebonds(len(x))
fpl.addBonds(bonds)
# decompose after adding tuples and bonds
print "Added tuples and bonds, decomposing now ..."
system.storage.decompose()
print "done decomposing"
# AdResS Verlet list
vl = espresso.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# non-bonded potentials
# LJ Capped WCA between AT and tabulated potential between CG particles
interNB = espresso.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type
potWCA = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca)
potCG = espresso.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG
interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG
system.addInteraction(interNB)
# bonded potentials
# Quartic potential between AT particles
potQuartic = espresso.interaction.Quartic(K=75.0, r0=1.0)
interQuartic = espresso.interaction.FixedPairListQuartic(system, fpl, potQuartic)
system.addInteraction(interQuartic)
# VelocityVerlet integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add AdResS extension
adress = espresso.integrator.Adress(system, vl, ftpl)
integrator.addExtension(adress)
# add Langevin thermostat extension
#langevin = espresso.integrator.LangevinThermostat(system)
#langevin.gamma = gamma
#langevin.temperature = temp
#langevin.adress = True # enable AdResS!
#integrator.addExtension(langevin)
# add TDF (dummy, just testing)
fec = espresso.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2])
fec.addForce(itype=3, filename=tabFEC, type=0)
integrator.addExtension(fec)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espresso.tools.AdressDecomp(system, integrator)
# system information
print ''
print 'AdResS Center =', [Lx/2, Ly/2, Lz/2]
print 'number of AT particles =', num_particles
print 'number of CG particles =', num_particlesCG
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espresso.analysis.Temperature(system)
fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic ecorrection\n')
sys.stdout.write(fmt % (0, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# Density profile preparation
density_array_total = []
Adds = 0.0
densityprofilegrid = 100
# Timer, Steps
start_time = time.clock()
nsteps = steps / intervals
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
# calculate density profile
if s > 10:
densityprofile = espresso.analysis.XDensity(system)
density_array = densityprofile.compute(densityprofilegrid)
for i in range(len(density_array)):
if(i>=len(density_array_total)):
density_array_total.append(density_array[i])
else:
density_array_total[i] += density_array[i]
Adds += 1.0
sys.stdout.write(fmt % (step, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# correct the density profile according to number of samples
for i in range(len(density_array_total)):
density_array_total[i] /= Adds
# printing density profile
nameFile = 'density_profile_Gibbs.dat'
print ''
print "Printing the density profile to %s\n" %nameFile
tempFile = open (nameFile, 'w')
fmt = ' %12.8f %12.8f\n'
dr = Lx / float(densityprofilegrid)
for i in range( len(density_array_total) ):
tempFile.write(fmt % ( (i+0.5)*dr, density_array_total[i] ))
tempFile.close()
# simulation information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
| gpl-3.0 |
Rudd-O/cloud-tool | cloudapis/cloud.py | 1 | 2976 | '''Implements the Cloud.com API'''
from cloudtool.utils import describe
import urllib
import urllib2
import os
import xml.dom.minidom
class CloudAPI:
@describe("server", "Management Server host name or address")
@describe("responseformat", "Response format: xml or json")
def __init__(self,
server="127.0.0.1:8096",
responseformat="xml",
):
self.__dict__.update(locals())
def _make_request(self,command,parameters=None):
'''Command is a string, parameters is a dictionary'''
if ":" in self.server:
host,port = self.server.split(":")
port = int(port)
else:
host = self.server
port = 8096
url = "http://" + self.server + "/?"
if not parameters: parameters = {}
parameters["command"] = command
parameters["response"] = self.responseformat
querystring = urllib.urlencode(parameters)
url += querystring
f = urllib2.urlopen(url)
data = f.read()
return data
def load_dynamic_methods():
'''creates smart function objects for every method in the commands.xml file'''
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE: rc.append(node.data)
return ''.join(rc)
# FIXME figure out installation and packaging
xmlfile = os.path.join(os.path.dirname(__file__),"commands.xml")
dom = xml.dom.minidom.parse(xmlfile)
for cmd in dom.getElementsByTagName("command"):
name = getText(cmd.getElementsByTagName('name')[0].childNodes).strip()
assert name
description = cmd.getElementsByTagName('name')[0].getAttribute("description")
if description: description = '"""%s"""' % description
else: description = ''
arguments = []
options = []
descriptions = []
for param in cmd.getElementsByTagName('arg'):
argname = getText(param.childNodes).strip()
assert argname
required = param.getAttribute("required").strip()
if required == 'true': required = True
elif required == 'false': required = False
else: raise AssertionError, "Not reached"
if required: arguments.append(argname)
options.append(argname)
description = param.getAttribute("description").strip()
if description: descriptions.append( (argname,description) )
funcparams = ["self"] + [ "%s=None"%o for o in options ]
funcparams = ", ".join(funcparams)
code = """
def %s(%s):
%s
parms = locals()
del parms["self"]
for arg in %r:
if locals()[arg] is None:
raise TypeError, "%%s is a required option"%%arg
for k,v in parms.items():
if v is None: del parms[k]
output = self._make_request("%s",parms)
print output
"""%(name,funcparams,description,arguments,name)
namespace = {}
exec code.strip() in namespace
func = namespace[name]
for argname,description in descriptions:
func = describe(argname,description)(func)
yield (name,func)
for name,meth in load_dynamic_methods(): setattr(CloudAPI,name,meth)
implementor = CloudAPI
del name,meth,describe,load_dynamic_methods
| gpl-3.0 |
99cloud/keystone_register | openstack_dashboard/api/nova.py | 1 | 18812 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import ugettext as _
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1.security_groups import SecurityGroup as NovaSecurityGroup
from novaclient.v1_1.servers import REBOOT_HARD, REBOOT_SOFT
from horizon.conf import HORIZON_CONFIG
from horizon.utils.memoized import memoized
from openstack_dashboard.api.base import (APIResourceWrapper, QuotaSet,
APIDictWrapper, url_for)
from openstack_dashboard.api import network
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
class VNCConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class Server(APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name',
'tenant_id', 'user_id', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return "(not found)"
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
def reboot(self, hardness=REBOOT_HARD):
novaclient(self.request).servers.reboot(self.id, hardness)
class NovaUsage(APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup which wraps its
rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
if "_rules" not in self.__dict__:
manager = nova_rules.SecurityGroupRuleManager
self._rules = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return self.__dict__['_rules']
@rules.setter
def rules(self, value):
self._rules = value
class SecurityGroupRule(APIResourceWrapper):
""" Wrapper for individual rules in a SecurityGroup. """
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id, port_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id):
return instance_id
def is_simple_associate_supported(self):
return HORIZON_CONFIG["simple_ip_management"]
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=url_for(request, 'compute'),
insecure=insecure,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, ephemeral=0, swap=0,
metadata=None):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
ephemeral=ephemeral,
swap=swap)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list()
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping, nics=None,
instance_count=1):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
nics=nics,
min_count=instance_count), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
if search_opts is None:
search_opts = {}
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
return [Server(s, request)
for s in novaclient(request).servers.list(True, search_opts)]
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_security_groups(request, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = novaclient(request)
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [NovaSecurityGroup(nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
# Package up the rules, as well.
for sg in security_groups:
rule_objects = [SecurityGroupRule(rule) for rule in sg.rules]
sg.rules = rule_objects
return security_groups
def server_add_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.add_security_group(instance_id,
security_group_name)
def server_remove_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.remove_security_group(
instance_id,
security_group_name)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, hardness=REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def server_update(request, instance_id, name):
response = novaclient(request).servers.update(instance_id, name=name)
# TODO(gabriel): servers.update method doesn't return anything. :-(
if response is None:
return True
else:
return response
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def tenant_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def security_group_list(request):
return [SecurityGroup(g) for g
in novaclient(request).security_groups.list()]
def security_group_get(request, sg_id):
return SecurityGroup(novaclient(request).security_groups.get(sg_id))
def security_group_create(request, name, desc):
return SecurityGroup(novaclient(request).security_groups.create(name,
desc))
def security_group_delete(request, security_group_id):
novaclient(request).security_groups.delete(security_group_id)
def security_group_rule_create(request, parent_group_id, ip_protocol=None,
from_port=None, to_port=None, cidr=None,
group_id=None):
sg = novaclient(request).security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def security_group_rule_delete(request, security_group_rule_id):
novaclient(request).security_group_rules.delete(security_group_rule_id)
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api.cinder import cinderclient
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinderclient(request).volumes.get(volume.id)
volume.name = volume_data.display_name
return volumes
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
| apache-2.0 |
sanjeevtripurari/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/staticfiles/storage.py | 90 | 12464 | from __future__ import unicode_literals
import hashlib
import os
import posixpath
import re
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.six.moves.urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
from django.utils._os import upath
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Retuns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return 'staticfiles:%s' % hashlib.md5(force_bytes(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given SortedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(upath(mod.__file__))
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| apache-2.0 |
dulems/hue | desktop/core/ext-py/guppy-0.1.10/guppy/etc/xterm.py | 37 | 2082 | #._cv_part xterm
# Run an xterm on current process or a forked process
# Adapted from pty.py in Python 1.5.2 distribution.
# The pty.fork() couldnt be used because it didn't return
# the pty name needed by xterm
# I couldnt import pty.py to use master_open because it didn't find termios.
import os, sys, FCNTL
# We couldnt find termios
STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO = 0, 1, 2
# Open pty master. Returns (master_fd, tty_name). SGI and Linux/BSD version.
# Copied from pty.py from Python 1.5.2. /SN
def master_open():
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(FCNTL.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, FCNTL.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
# Open the pty slave. Acquire the controlling terminal.
# Returns file descriptor. Linux version. (Should be universal? --Guido)
# Copied from pty.py from Python 1.5.2. /SN
def slave_open(tty_name):
return os.open(tty_name, FCNTL.O_RDWR)
def xterm(prog = None, options=''):
master_fd, tty_name = master_open()
pid = os.fork()
if pid:
# Acquire controlling terminal.
slave_fd = slave_open(tty_name)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
os.close(master_fd)
sys.stdin.readline() # Throw away an init string from xterm
if prog is not None:
prog()
else:
os.setsid()
cmd = 'xterm %s -S%s%d'%(options, tty_name[-2:], master_fd)
os.system(cmd)
#os.waitpid(pid, 0)
return pid
def forkxterm(prog = None, options=''):
pid = os.fork()
if pid:
return pid
else:
os.setsid()
pid = xterm(prog, options)
if not pid:
os._exit(0)
def hello():
print 'hello'
while 1:
pass
| apache-2.0 |
dagwieers/ansible | lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py | 14 | 8073 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_postgresqlconfiguration
version_added: "2.8"
short_description: Manage Azure PostgreSQL Configuration.
description:
- Update or reset Azure PostgreSQL Configuration setting.
options:
resource_group:
description:
- The name of the resource group that contains the resource.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- Setting name.
required: True
value:
description:
- Setting value.
state:
description:
- Assert the state of the PostgreSQL setting. Use C(present) to update setting, or
C(absent) to reset to default value.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Update PostgreSQL Server setting
azure_rm_postgresqlconfiguration:
resource_group: myResourceGroup
server_name: myServer
name: deadlock_timeout
value: 2000
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/myServer/confi
gurations/event_scheduler"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.postgresql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMPostgreSqlConfigurations(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
value=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.value = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMPostgreSqlConfigurations, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
old_response = self.get_configuration()
if not old_response:
self.log("Configuration instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Configuration instance already exists")
if self.state == 'absent' and old_response['source'] == 'user-override':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Configuration instance has to be deleted or may be updated")
if self.value != old_response.get('value'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Configuration instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_configuration()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Configuration instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_configuration()
else:
self.log("Configuration instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_configuration(self):
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
try:
response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
value=self.value,
source='user-override')
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Configuration instance.')
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
return response.as_dict()
def delete_configuration(self):
self.log("Deleting the Configuration instance {0}".format(self.name))
try:
response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
source='system-default')
except CloudError as e:
self.log('Error attempting to delete the Configuration instance.')
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
return True
def get_configuration(self):
self.log("Checking if the Configuration instance {0} is present".format(self.name))
found = False
try:
response = self.postgresql_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Configuration instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Configuration instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMPostgreSqlConfigurations()
if __name__ == '__main__':
main()
| gpl-3.0 |
rohitwaghchaure/vestasi-frappe | frappe/core/doctype/custom_field/custom_field.py | 21 | 3197 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from frappe import _
from frappe.model.document import Document
class CustomField(Document):
def autoname(self):
self.set_fieldname()
self.name = self.dt + "-" + self.fieldname
def set_fieldname(self):
if not self.fieldname:
if not self.label:
frappe.throw(_("Label is mandatory"))
# remove special characters from fieldname
self.fieldname = filter(lambda x: x.isdigit() or x.isalpha() or '_',
cstr(self.label).lower().replace(' ','_'))
def validate(self):
if not self.idx:
self.idx = len(frappe.get_meta(self.dt).get("fields")) + 1
if not self.fieldname:
frappe.throw(_("Fieldname not set for Custom Field"))
def on_update(self):
frappe.clear_cache(doctype=self.dt)
if not getattr(self, "ignore_validate", False):
# validate field
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.dt)
# create property setter to emulate insert after
self.create_property_setter()
# update the schema
# if not frappe.flags.in_test:
from frappe.model.db_schema import updatedb
updatedb(self.dt)
def on_trash(self):
# delete property setter entries
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s""",
(self.dt, self.fieldname))
frappe.clear_cache(doctype=self.dt)
def create_property_setter(self):
if not self.insert_after: return
dt_meta = frappe.get_meta(self.dt)
if not dt_meta.get_field(self.insert_after):
frappe.throw(_("Insert After field '{0}' mentioned in Custom Field '{1}', does not exist")
.format(dt_meta.get_label(self.insert_after), self.label), frappe.DoesNotExistError)
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s
AND property = 'previous_field'""", (self.dt, self.fieldname))
frappe.make_property_setter({
"doctype":self.dt,
"fieldname": self.fieldname,
"property": "previous_field",
"value": self.insert_after
}, validate_fields_for_doctype=False)
@frappe.whitelist()
def get_fields_label(doctype=None):
return [{"value": df.fieldname or "", "label": _(df.label or "")} for df in frappe.get_meta(doctype).get("fields")]
def create_custom_field_if_values_exist(doctype, df):
df = frappe._dict(df)
if df.fieldname in frappe.db.get_table_columns(doctype) and \
frappe.db.sql("""select count(*) from `tab{doctype}`
where ifnull({fieldname},'')!=''""".format(doctype=doctype, fieldname=df.fieldname))[0][0]:
create_custom_field(doctype, df)
def create_custom_field(doctype, df):
if not frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df.fieldname}):
frappe.get_doc({
"doctype":"Custom Field",
"dt": doctype,
"permlevel": df.get("permlevel") or 0,
"label": df.get("label"),
"fieldname": df.get("fieldname"),
"fieldtype": df.get("fieldtype"),
"options": df.get("options"),
"insert_after": df.get("insert_after"),
"print_hide": df.get("print_hide")
}).insert()
| mit |
sandan/sqlalchemy | examples/large_collection/large_collection.py | 32 | 3291 |
from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey,
create_engine)
from sqlalchemy.orm import (mapper, relationship, sessionmaker)
meta = MetaData()
org_table = Table('organizations', meta,
Column('org_id', Integer, primary_key=True),
Column('org_name', String(50), nullable=False, key='name'),
mysql_engine='InnoDB')
member_table = Table('members', meta,
Column('member_id', Integer, primary_key=True),
Column('member_name', String(50), nullable=False, key='name'),
Column('org_id', Integer,
ForeignKey('organizations.org_id', ondelete="CASCADE")),
mysql_engine='InnoDB')
class Organization(object):
def __init__(self, name):
self.name = name
class Member(object):
def __init__(self, name):
self.name = name
mapper(Organization, org_table, properties = {
'members' : relationship(Member,
# Organization.members will be a Query object - no loading
# of the entire collection occurs unless requested
lazy="dynamic",
# Member objects "belong" to their parent, are deleted when
# removed from the collection
cascade="all, delete-orphan",
# "delete, delete-orphan" cascade does not load in objects on delete,
# allows ON DELETE CASCADE to handle it.
# this only works with a database that supports ON DELETE CASCADE -
# *not* sqlite or MySQL with MyISAM
passive_deletes=True,
)
})
mapper(Member, member_table)
if __name__ == '__main__':
engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
meta.create_all(engine)
# expire_on_commit=False means the session contents
# will not get invalidated after commit.
sess = sessionmaker(engine, expire_on_commit=False)()
# create org with some members
org = Organization('org one')
org.members.append(Member('member one'))
org.members.append(Member('member two'))
org.members.append(Member('member three'))
sess.add(org)
print("-------------------------\nflush one - save org + 3 members\n")
sess.commit()
# the 'members' collection is a Query. it issues
# SQL as needed to load subsets of the collection.
print("-------------------------\nload subset of members\n")
members = org.members.filter(member_table.c.name.like('%member t%')).all()
print(members)
# new Members can be appended without any
# SQL being emitted to load the full collection
org.members.append(Member('member four'))
org.members.append(Member('member five'))
org.members.append(Member('member six'))
print("-------------------------\nflush two - save 3 more members\n")
sess.commit()
# delete the object. Using ON DELETE CASCADE
# SQL is only emitted for the head row - the Member rows
# disappear automatically without the need for additional SQL.
sess.delete(org)
print("-------------------------\nflush three - delete org, delete members in one statement\n")
sess.commit()
print("-------------------------\nno Member rows should remain:\n")
print(sess.query(Member).count())
sess.close()
print("------------------------\ndone. dropping tables.")
meta.drop_all(engine) | mit |
VcamX/grpc | src/python/grpcio/tests/unit/framework/common/test_control.py | 1 | 3314 | # Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for instructing systems under test to block or fail."""
import abc
import contextlib
import threading
import six
class Defect(Exception):
"""Simulates a programming defect raised into in a system under test.
Use of a standard exception type is too easily misconstrued as an actual
defect in either the test infrastructure or the system under test.
"""
class Control(six.with_metaclass(abc.ABCMeta)):
"""An object that accepts program control from a system under test.
Systems under test passed a Control should call its control() method
frequently during execution. The control() method may block, raise an
exception, or do nothing, all according to the enclosing test's desire for
the system under test to simulate hanging, failing, or functioning.
"""
@abc.abstractmethod
def control(self):
"""Potentially does anything."""
raise NotImplementedError()
class PauseFailControl(Control):
"""A Control that can be used to pause or fail code under control."""
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
def control(self):
with self._condition:
if self._fail:
raise Defect()
while self._paused:
self._condition.wait()
@contextlib.contextmanager
def pause(self):
"""Pauses code under control while controlling code is in context."""
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self):
"""Fails code under control while controlling code is in context."""
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
| bsd-3-clause |
fengmk2/node-gyp | gyp/pylib/gyp/generator/gypd.py | 912 | 3325 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
evansd/django | tests/proxy_models/tests.py | 44 | 15782 | from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, management
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.urls import reverse
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MultiUserProxy, MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug,
ProxyImprovement, ProxyProxyBug, ProxyTrackerUser, State, StateProxy,
StatusPerson, TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.DoesNotExist):
MyPersonProxy.objects.get(name='Zathras')
with self.assertRaises(Person.MultipleObjectsReturned):
MyPersonProxy.objects.get(id__lt=max_id + 1)
with self.assertRaises(Person.DoesNotExist):
StatusPerson.objects.get(name='Zathras')
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.MultipleObjectsReturned):
StatusPerson.objects.get(id__lt=max_id + 1)
def test_abstract_base_with_model_fields(self):
msg = "Abstract base class containing model fields not permitted for proxy model 'NoAbstract'."
with self.assertRaisesMessage(TypeError, msg):
class NoAbstract(Abstract):
class Meta:
proxy = True
def test_too_many_concrete_classes(self):
msg = "Proxy model 'TooManyBases' has more than one non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class TooManyBases(User, Person):
class Meta:
proxy = True
def test_no_base_classes(self):
msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class NoBaseClasses(models.Model):
class Meta:
proxy = True
@isolate_apps('proxy_models')
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
@isolate_apps('proxy_models')
def test_swappable(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
Permission.objects.get(name="May display users information")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_proxy_models(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name, 'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name='Contributor', status='contrib')
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertSequenceEqual(TrackerUser.objects.filter(issues=issue), [tu])
self.assertSequenceEqual(ProxyTrackerUser.objects.filter(issues=issue), [ptu])
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor', status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta', assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor', status='proxy')
Improvement.objects.create(
summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0],
)
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(ROOT_URLCONF='proxy_models.urls')
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = AuthUser.objects.create(is_superuser=True, is_staff=True)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(6):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
| bsd-3-clause |
iansf/sky_engine | sky/tools/webkitpy/layout_tests/port/mock_drt.py | 10 | 11787 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is an implementation of the Port interface that overrides other
ports and changes the Driver binary to "MockDRT".
The MockDRT objects emulate what a real DRT would do. In particular, they
return the output a real DRT would return for a given test, assuming that
test actually passes (except for reftests, which currently cause the
MockDRT to crash).
"""
import base64
import logging
import optparse
import os
import sys
import types
# Since we execute this script directly as part of the unit tests, we need to ensure
# that tools is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.factory import PortFactory
_log = logging.getLogger(__name__)
class MockDRTPort(object):
port_name = 'mock'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
return port_name
def __init__(self, host, port_name, **kwargs):
self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
self.__delegate_driver_class = self.__delegate._driver_class
self.__delegate._driver_class = types.MethodType(self._driver_class, self.__delegate)
def __getattr__(self, name):
return getattr(self.__delegate, name)
def check_build(self, needs_http, printer):
return True
def check_sys_deps(self, needs_http):
return True
def _driver_class(self, delegate):
return self._mocked_driver_maker
def _mocked_driver_maker(self, port, worker_number, pixel_tests, no_timeout=False):
path_to_this_file = self.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
driver = self.__delegate_driver_class()(self, worker_number, pixel_tests, no_timeout)
driver.cmd_line = self._overriding_cmd_line(driver.cmd_line,
self.__delegate._path_to_driver(),
sys.executable,
path_to_this_file,
self.__delegate.name())
return driver
@staticmethod
def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
def new_cmd_line(pixel_tests, per_test_args):
cmd_line = original_cmd_line(pixel_tests, per_test_args)
index = cmd_line.index(driver_path)
cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
return cmd_line
return new_cmd_line
def start_helper(self):
pass
def start_sky_server(self, additional_dirs, number_of_servers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_helper(self):
pass
def stop_sky_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _make_wdiff_available(self):
self.__delegate._wdiff_available = True
def setup_environ_for_server(self, server_name):
env = self.__delegate.setup_environ_for_server()
# We need to propagate PATH down so the python code can find the checkout.
env['PATH'] = os.environ['PATH']
return env
def lookup_virtual_test_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
options, args = parse_options(argv)
drt = MockDRT(options, args, host, stdin, stdout, stderr)
return drt.run()
def parse_options(argv):
# We do custom arg parsing instead of using the optparse module
# because we don't want to have to list every command line flag DRT
# accepts, and optparse complains about unrecognized flags.
def get_arg(arg_name):
if arg_name in argv:
index = argv.index(arg_name)
return argv[index + 1]
return None
options = optparse.Values({
'actual_directory': get_arg('--actual-directory'),
'platform': get_arg('--platform'),
'virtual_test_suite_base': get_arg('--virtual-test-suite-base'),
'virtual_test_suite_name': get_arg('--virtual-test-suite-name'),
})
return (options, argv)
class MockDRT(object):
def __init__(self, options, args, host, stdin, stdout, stderr):
self._options = options
self._args = args
self._host = host
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
port_name = None
if options.platform:
port_name = options.platform
self._port = PortFactory(host).get(port_name=port_name, options=options)
self._driver = self._port.create_driver(0)
def run(self):
while True:
line = self._stdin.readline()
if not line:
return 0
driver_input = self.input_from_line(line)
dirname, basename = self._port.split_test(driver_input.test_name)
is_reftest = (self._port.reference_files(driver_input.test_name) or
self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
output = self.output_for_test(driver_input, is_reftest)
self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
uri = vals[0]
checksum = None
should_run_pixel_tests = False
if len(vals) == 2 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
elif len(vals) == 3 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
checksum = vals[2]
elif len(vals) != 1:
raise NotImplementedError
if uri.startswith('http://') or uri.startswith('https://'):
test_name = self._driver.uri_to_test(uri)
else:
test_name = self._port.relative_test_filename(uri)
return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
def output_for_test(self, test_input, is_reftest):
port = self._port
if self._options.virtual_test_suite_name:
test_input.test_name = test_input.test_name.replace(self._options.virtual_test_suite_base, self._options.virtual_test_suite_name)
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
actual_checksum = 'mock-checksum'
actual_image = 'blank'
if test_input.test_name.endswith('-mismatch.html'):
actual_text = 'not reference text\n'
actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif test_input.should_run_pixel_test and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
actual_image = port.expected_image(test_input.test_name)
if self._options.actual_directory:
actual_path = port._filesystem.join(self._options.actual_directory, test_input.test_name)
root, _ = port._filesystem.splitext(actual_path)
text_path = root + '-actual.txt'
if port._filesystem.exists(text_path):
actual_text = port._filesystem.read_binary_file(text_path)
audio_path = root + '-actual.wav'
if port._filesystem.exists(audio_path):
actual_audio = port._filesystem.read_binary_file(audio_path)
image_path = root + '-actual.png'
if port._filesystem.exists(image_path):
actual_image = port._filesystem.read_binary_file(image_path)
with port._filesystem.open_binary_file_for_reading(image_path) as filehandle:
actual_checksum = read_checksum_from_png.read_checksum(filehandle)
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
self._stdout.write(base64.b64encode(output.audio))
self._stdout.write('\n')
else:
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
if output.text:
self._stdout.write(output.text)
self._stdout.write('#EOF\n')
if test_input.should_run_pixel_test and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
if output.image_hash != test_input.image_hash:
self._stdout.write('Content-Type: image/png\n')
self._stdout.write('Content-Length: %s\n' % len(output.image))
self._stdout.write(output.image)
self._stdout.write('#EOF\n')
self._stdout.flush()
self._stderr.write('#EOF\n')
self._stderr.flush()
if __name__ == '__main__':
# Note that the Mock in MockDRT refers to the fact that it is emulating a
# real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
| bsd-3-clause |
pechatny/basic-flask-app | src/app/flask/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py | 167 | 6810 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
| mit |
juanyaw/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp1255.py | 93 | 13029 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
u'\ufffe' # 0xCA -> UNDEFINED
u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
u'\u05bd' # 0xCD -> HEBREW POINT METEG
u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
u'\u05bf' # 0xCF -> HEBREW POINT RAFE
u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
u'\ufffe' # 0xD9 -> UNDEFINED
u'\ufffe' # 0xDA -> UNDEFINED
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
hanselke/erpnext-1 | erpnext/stock/doctype/delivery_note/delivery_note.py | 11 | 12802 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint
from frappe import msgprint, _
import frappe.defaults
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class DeliveryNote(SellingController):
def __init__(self, arg1, arg2=None):
super(DeliveryNote, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_delivered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_order',
'status_field': 'delivery_status',
'keyword': 'Delivered',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'second_source_extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and ifnull(update_stock, 0) = 1)"""
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Invoice Item',
'join_field': 'si_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Invoice',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_invoice',
'overflow_type': 'delivery'
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
# 'target_parent_field': 'per_delivered',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'against_sales_order',
# 'overflow_type': 'delivery',
'extra_cond': """ and exists (select name from `tabDelivery Note` where name=`tabDelivery Note Item`.parent and is_return=1)"""
}]
def onload(self):
billed_qty = frappe.db.sql("""select sum(ifnull(qty, 0)) from `tabSales Invoice Item`
where docstatus=1 and delivery_note=%s""", self.name)
if billed_qty:
total_qty = sum((item.qty for item in self.get("items")))
self.get("__onload").billing_complete = (billed_qty[0][0] == total_qty)
def before_print(self):
def toggle_print_hide(meta, fieldname):
df = meta.get_field(fieldname)
if self.get("print_without_amount"):
df.set("__print_hide", 1)
else:
df.delete_key("__print_hide")
item_meta = frappe.get_meta("Delivery Note Item")
print_hide_fields = {
"parent": ["grand_total", "rounded_total", "in_words", "currency", "total", "taxes"],
"items": ["rate", "amount", "price_list_rate", "discount_percentage"]
}
for key, fieldname in print_hide_fields.items():
for f in fieldname:
toggle_print_hide(self.meta if key == "parent" else item_meta, f)
def set_actual_qty(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
actual_qty = frappe.db.sql("""select actual_qty from `tabBin`
where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def so_required(self):
"""check in manage account if sales order required or not"""
if frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes':
for d in self.get('items'):
if not d.against_sales_order:
frappe.throw(_("Sales Order required for Item {0}").format(d.item_code))
def validate(self):
super(DeliveryNote, self).validate()
self.set_status()
self.so_required()
self.validate_proj_cust()
self.check_stop_sales_order("against_sales_order")
self.validate_for_items()
self.validate_warehouse()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_with_previous_doc()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self, 'items')
self.update_current_stock()
if not self.installation_status: self.installation_status = 'Not Installed'
def validate_with_previous_doc(self):
for fn in (("Sales Order", "against_sales_order", "so_detail"),
("Sales Invoice", "against_sales_invoice", "si_detail")):
if filter(None, [getattr(d, fn[1], None) for d in self.get("items")]):
super(DeliveryNote, self).validate_with_previous_doc({
fn[0]: {
"ref_dn_field": fn[1],
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([["Sales Order", "sales_order", "so_detail"],
["Sales Invoice", "sales_invoice", "si_detail"]])
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project_name and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate_for_items(self):
check_list, chk_dupl_itm = [], []
if cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
return
for d in self.get('items'):
e = [d.item_code, d.description, d.warehouse, d.against_sales_order or d.against_sales_invoice, d.batch_no or '']
f = [d.item_code, d.description, d.against_sales_order or d.against_sales_invoice]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1:
if e in check_list:
msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
chk_dupl_itm.append(f)
def validate_warehouse(self):
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1:
if not d['warehouse']:
frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"]))
def update_current_stock(self):
if self.get("_action") and self._action != "update_after_submit":
for d in self.get('items'):
d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, "actual_qty")
for d in self.get('packed_items'):
bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True)
if bin_qty:
d.actual_qty = flt(bin_qty.actual_qty)
d.projected_qty = flt(bin_qty.projected_qty)
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
# update delivered qty in sales order
self.update_prevdoc_status()
if not self.is_return:
self.check_credit_limit()
self.update_stock_ledger()
self.make_gl_entries()
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_stop_sales_order("against_sales_order")
self.check_next_docstatus()
self.update_prevdoc_status()
self.update_stock_ledger()
frappe.db.set(self, 'status', 'Cancelled')
self.cancel_packing_slips()
self.make_gl_entries_on_cancel()
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
validate_against_credit_limit = False
for d in self.get("items"):
if not (d.against_sales_order or d.against_sales_invoice):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company)
def validate_packed_qty(self):
"""
Validate that if packed qty exists, it should be equal to qty
"""
if not any([flt(d.get('packed_qty')) for d in self.get("items")]):
return
has_error = False
for d in self.get("items"):
if flt(d.get('qty')) != flt(d.get('packed_qty')):
frappe.msgprint(_("Packed quantity must equal quantity for Item {0} in row {1}").format(d.item_code, d.idx))
has_error = True
if has_error:
raise frappe.ValidationError
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.delivery_note = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Sales Invoice {0} has already been submitted").format(submit_rv[0][0]))
submit_in = frappe.db.sql("""select t1.name
from `tabInstallation Note` t1, `tabInstallation Note Item` t2
where t1.name = t2.parent and t2.prevdoc_docname = %s and t1.docstatus = 1""",
(self.name))
if submit_in:
frappe.throw(_("Installation Note {0} has already been submitted").format(submit_in[0][0]))
def cancel_packing_slips(self):
"""
Cancel submitted packing slips related to this delivery note
"""
res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s
AND docstatus = 1""", self.name)
if res:
for r in res:
ps = frappe.get_doc('Packing Slip', r[0])
ps.cancel()
frappe.msgprint(_("Packing Slip(s) cancelled"))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Shipments")
return list_context
def get_invoiced_qty_map(delivery_note):
"""returns a map: {dn_detail: invoiced_qty}"""
invoiced_qty_map = {}
for dn_detail, qty in frappe.db.sql("""select dn_detail, qty from `tabSales Invoice Item`
where delivery_note=%s and docstatus=1""", delivery_note):
if not invoiced_qty_map.get(dn_detail):
invoiced_qty_map[dn_detail] = 0
invoiced_qty_map[dn_detail] += qty
return invoiced_qty_map
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
invoiced_qty_map = get_invoiced_qty_map(source_name)
def update_accounts(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
if len(target.get("items")) == 0:
frappe.throw(_("All these items have already been invoiced"))
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doc = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "dn_detail",
"parent": "delivery_note",
"so_detail": "so_detail",
"against_sales_order": "sales_order",
"serial_no": "serial_no"
},
"postprocess": update_item,
"filter": lambda d: d.qty - invoiced_qty_map.get(d.name, 0)<=0
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, update_accounts)
return doc
@frappe.whitelist()
def make_installation_note(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.qty = flt(obj.qty) - flt(obj.installed_qty)
target.serial_no = obj.serial_no
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Installation Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Installation Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
},
"postprocess": update_item,
"condition": lambda doc: doc.installed_qty < doc.qty
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_packing_slip(source_name, target_doc=None):
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Packing Slip",
"field_map": {
"name": "delivery_note",
"letter_head": "letter_head"
},
"validation": {
"docstatus": ["=", 0]
}
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Delivery Note", source_name, target_doc)
| agpl-3.0 |
hychen/boliau | boliau/plugins/lp_cli/actionlib.py | 1 | 6379 | #!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: lp_cli.py
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <ossug.hychen@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import logging
from boliau import actionlib
from launchpadlib.launchpad import Launchpad
# -----------------------------------------------------------------------
# Global Variables
# -----------------------------------------------------------------------
LP_VALIDATE_BUGTASK_STATUS={
'In Progress': 100,
'Triaged': 90,
'Confirmed': 80,
'New': 70,
'Incomplete (with response)': 60,
'Incomplete (without response)': 50,
'Incomplete': 40,
'Fix Committed': 30,
'Fix Released': 20,
'Won\'t Fix': 10,
'Invalid': 0,
'Opinion': 0}
LP_VALIDATE_BUGTASK_IMPORTANCE={
'Critical': 5,
'High': 4,
'Medium': 3,
'Low': 2,
'Wishlist': 1,
'Undecided': 0}
LP_VALIDATE_BRANCH_STATUS=(
'Experimental',
'Development',
'Mature',
'Merged',
'Abandoned')
class LaunchpadDatabase(object):
lp = None
LP_VALIDATE_BUGTASK_STATUS = LP_VALIDATE_BUGTASK_STATUS
LP_VALIDATE_BUGTASK_IMPORTANCE = LP_VALIDATE_BUGTASK_IMPORTANCE
def connect(self):
if not self.lp:
system = os.getenv('LPSYSTEM') or 'production'
cachedir = os.path.expanduser("~/.launchpadlib/cache")
self.lp = Launchpad.login_with('lp-cli', system, cachedir)
return self.lp
def get(self, entry_type, entry_id):
self.connect()
if entry_type != 'people':
entry_type = entry_type+'s'
try:
return getattr(self.lp, entry_type)[entry_id]
except KeyError as e:
logging.debug(e)
return None
def load_lp_objects(self, opts):
if opts.get('assignee'):
opts['assignee'] = self.get('people', opts['assignee'])
return opts
class _StartAction(object):
def __init__(self):
self.db = LaunchpadDatabase()
self.acc = actionlib.Mission(self.db)
# -----------------------------------------------------------------------
# Action Classes
# -----------------------------------------------------------------------
class Get(_StartAction):
desc = """
Get a Launchpad Entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
return db.get(entry_type, entry_id)
class FindBugTasks(_StartAction):
desc = """
Search Bug Tasks of the entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
entry = db.get(entry_type, entry_id)
# handling milestone.
if entry and entry_type == 'project' and opts.get('milestone'):
opts['milestone'] = entry.getMilestone(name=opts['milestone'])
# handling status.
if 'Todo' in opts['status'] and 'All' in opts['status']:
raise Exception("Todo and All are confilict.")
if 'All' in opts['status']:
opts['status'] = db.LP_VALIDATE_BUGTASK_STATUS.keys()
elif 'Todo' in opts['status']:
opts['status'] = filter(lambda e: e not in ('Invalid',
'Won\'t Fix',
'Fix Committed',
'Fix Released',
'Opinion',),
db.LP_VALIDATE_BUGTASK_STATUS.keys())
opts = db.load_lp_objects(opts)
return entry.searchTasks(**opts)
class FindPackages(_StartAction):
desc = 'Find packages'
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
ppa = opts.pop('ppa').replace('ppa:', '')
ppa_owner, ppa_name = ppa.split('/')
self.acc.add_task(repr(self.__class__),
self.maintask,
ppa_owner, ppa_name,
**opts)
return self.acc
def maintask(db, ppa_onwer, ppa_name, **opts):
people = db.get('people', ppa_onwer)
if not people:
people = db.get('team', ppa_onwer)
archive = people.getPPAByName(name=ppa_name)
return archive.getPublishedSources(status='Published')
| mit |
sudheesh001/oh-mainline | vendor/packages/Django/django/contrib/formtools/wizard/legacy.py | 115 | 11275 | """
FormWizard class -- implements a multi-page form, validating between each
step and storing the form's state as HTML hidden fields so that no state is
stored on the server side.
"""
from django.forms import HiddenInput
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.contrib.formtools.utils import form_hmac
class FormWizard(object):
# The HTML (and POST data) field name for the "step" variable.
step_field_name="wizard_step"
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form_list, initial=None):
"""
Start a new wizard with a list of forms.
form_list should be a list of Form classes (not instances).
"""
self.form_list = form_list[:]
self.initial = initial or {}
# Dictionary of extra template context variables.
self.extra_context = {}
# A zero-based counter keeping track of which step we're in.
self.step = 0
import warnings
warnings.warn(
'Old-style form wizards have been deprecated; use the class-based '
'views in django.contrib.formtools.wizard.views instead.',
DeprecationWarning)
def __repr__(self):
return "step: %d\nform_list: %s\ninitial_data: %s" % (self.step, self.form_list, self.initial)
def get_form(self, step, data=None):
"Helper method that returns the Form instance for the given step."
# Sanity check.
if step >= self.num_steps():
raise Http404('Step %s does not exist' % step)
return self.form_list[step](data, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None))
def num_steps(self):
"Helper method that returns the number of steps."
# You might think we should just set "self.num_steps = len(form_list)"
# in __init__(), but this calculation needs to be dynamic, because some
# hook methods might alter self.form_list.
return len(self.form_list)
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
return constant_time_compare(token, expected)
@method_decorator(csrf_protect)
def __call__(self, request, *args, **kwargs):
"""
Main method that does all the hard work, conforming to the Django view
interface.
"""
if 'extra_context' in kwargs:
self.extra_context.update(kwargs['extra_context'])
current_step = self.get_current_or_first_step(request, *args, **kwargs)
self.parse_params(request, *args, **kwargs)
# Validate and process all the previous forms before instantiating the
# current step's form in case self.process_step makes changes to
# self.form_list.
# If any of them fails validation, that must mean the validator relied
# on some other input, such as an external Web site.
# It is also possible that alidation might fail under certain attack
# situations: an attacker might be able to bypass previous stages, and
# generate correct security hashes for all the skipped stages by virtue
# of:
# 1) having filled out an identical form which doesn't have the
# validation (and does something different at the end),
# 2) or having filled out a previous version of the same form which
# had some validation missing,
# 3) or previously having filled out the form when they had more
# privileges than they do now.
#
# Since the hashes only take into account values, and not other other
# validation the form might do, we must re-do validation now for
# security reasons.
previous_form_list = []
for i in range(current_step):
f = self.get_form(i, request.POST)
if not self._check_security_hash(request.POST.get("hash_%d" % i, ''),
request, f):
return self.render_hash_failure(request, i)
if not f.is_valid():
return self.render_revalidation_failure(request, i, f)
else:
self.process_step(request, f, i)
previous_form_list.append(f)
# Process the current step. If it's valid, go to the next step or call
# done(), depending on whether any steps remain.
if request.method == 'POST':
form = self.get_form(current_step, request.POST)
else:
form = self.get_form(current_step)
if form.is_valid():
self.process_step(request, form, current_step)
next_step = current_step + 1
if next_step == self.num_steps():
return self.done(request, previous_form_list + [form])
else:
form = self.get_form(next_step)
self.step = current_step = next_step
return self.render(form, request, current_step)
def render(self, form, request, step, context=None):
"Renders the given Form object, returning an HttpResponse."
old_data = request.POST
prev_fields = []
if old_data:
hidden = HiddenInput()
# Collect all data from previous steps and render it as HTML hidden fields.
for i in range(step):
old_form = self.get_form(i, old_data)
hash_name = 'hash_%s' % i
prev_fields.extend([bf.as_hidden() for bf in old_form])
prev_fields.append(hidden.render(hash_name, old_data.get(hash_name, self.security_hash(request, old_form))))
return self.render_template(request, form, ''.join(prev_fields), step, context)
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def prefix_for_step(self, step):
"Given the step, returns a Form prefix to use."
return str(step)
def render_hash_failure(self, request, step):
"""
Hook for rendering a template if a hash check failed.
step is the step that failed. Any previous step is guaranteed to be
valid.
This default implementation simply renders the form for the given step,
but subclasses may want to display an error message, etc.
"""
return self.render(self.get_form(step), request, step, context={'wizard_error': _('We apologize, but your form has expired. Please continue filling out the form from this page.')})
def render_revalidation_failure(self, request, step, form):
"""
Hook for rendering a template if final revalidation failed.
It is highly unlikely that this point would ever be reached, but See
the comment in __call__() for an explanation.
"""
return self.render(form, request, step)
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def get_current_or_first_step(self, request, *args, **kwargs):
"""
Given the request object and whatever *args and **kwargs were passed to
__call__(), returns the current step (which is zero-based).
Note that the result should not be trusted. It may even be a completely
invalid number. It's not the job of this method to validate it.
"""
if not request.POST:
return 0
try:
step = int(request.POST.get(self.step_field_name, 0))
except ValueError:
return 0
return step
def parse_params(self, request, *args, **kwargs):
"""
Hook for setting some state, given the request object and whatever
*args and **kwargs were passed to __call__(), sets some state.
This is called at the beginning of __call__().
"""
pass
def get_template(self, step):
"""
Hook for specifying the name of the template to use for a given step.
Note that this can return a tuple of template names if you'd like to
use the template system's select_template() hook.
"""
return 'forms/wizard.html'
def render_template(self, request, form, previous_fields, step, context=None):
"""
Renders the template for the given step, returning an HttpResponse object.
Override this method if you want to add a custom context, return a
different MIME type, etc. If you only need to override the template
name, use get_template() instead.
The template will be rendered with the following context:
step_field -- The name of the hidden field containing the step.
step0 -- The current step (zero-based).
step -- The current step (one-based).
step_count -- The total number of steps.
form -- The Form instance for the current step (either empty
or with errors).
previous_fields -- A string representing every previous data field,
plus hashes for completed forms, all in the form of
hidden fields. Note that you'll need to run this
through the "safe" template filter, to prevent
auto-escaping, because it's raw HTML.
"""
context = context or {}
context.update(self.extra_context)
return render_to_response(self.get_template(step), dict(context,
step_field=self.step_field_name,
step0=step,
step=step + 1,
step_count=self.num_steps(),
form=form,
previous_fields=previous_fields
), context_instance=RequestContext(request))
def process_step(self, request, form, step):
"""
Hook for modifying the FormWizard's internal state, given a fully
validated Form object. The Form is guaranteed to have clean, valid
data.
This method should *not* modify any of that data. Rather, it might want
to set self.extra_context or dynamically alter self.form_list, based on
previously submitted forms.
Note that this method is called every time a page is rendered for *all*
submitted steps.
"""
pass
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, form_list):
"""
Hook for doing something with the validated data. This is responsible
for the final processing.
form_list is a list of Form instances, each containing clean, valid
data.
"""
raise NotImplementedError("Your %s class has not defined a done() method, which is required." % self.__class__.__name__)
| agpl-3.0 |
Curious72/sympy | sympy/ntheory/tests/test_bbp_pi.py | 45 | 9425 | from random import randint
from sympy.ntheory.bbp_pi import pi_hex_digits
from sympy.utilities.pytest import raises
# http://www.herongyang.com/Cryptography/Blowfish-First-8366-Hex-Digits-of-PI.html
# There are actually 8336 listed there; with the preppended 3 there are 8337
# below
dig=''.join('''
3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c89452821e638d013
77be5466cf34e90c6cc0ac29b7c97c50dd3f84d5b5b54709179216d5d98979fb1bd1310ba698dfb5
ac2ffd72dbd01adfb7b8e1afed6a267e96ba7c9045f12c7f9924a19947b3916cf70801f2e2858efc
16636920d871574e69a458fea3f4933d7e0d95748f728eb658718bcd5882154aee7b54a41dc25a59
b59c30d5392af26013c5d1b023286085f0ca417918b8db38ef8e79dcb0603a180e6c9e0e8bb01e8a
3ed71577c1bd314b2778af2fda55605c60e65525f3aa55ab945748986263e8144055ca396a2aab10
b6b4cc5c341141e8cea15486af7c72e993b3ee1411636fbc2a2ba9c55d741831f6ce5c3e169b8793
1eafd6ba336c24cf5c7a325381289586773b8f48986b4bb9afc4bfe81b6628219361d809ccfb21a9
91487cac605dec8032ef845d5de98575b1dc262302eb651b8823893e81d396acc50f6d6ff383f442
392e0b4482a484200469c8f04a9e1f9b5e21c66842f6e96c9a670c9c61abd388f06a51a0d2d8542f
68960fa728ab5133a36eef0b6c137a3be4ba3bf0507efb2a98a1f1651d39af017666ca593e82430e
888cee8619456f9fb47d84a5c33b8b5ebee06f75d885c12073401a449f56c16aa64ed3aa62363f77
061bfedf72429b023d37d0d724d00a1248db0fead349f1c09b075372c980991b7b25d479d8f6e8de
f7e3fe501ab6794c3b976ce0bd04c006bac1a94fb6409f60c45e5c9ec2196a246368fb6faf3e6c53
b51339b2eb3b52ec6f6dfc511f9b30952ccc814544af5ebd09bee3d004de334afd660f2807192e4b
b3c0cba85745c8740fd20b5f39b9d3fbdb5579c0bd1a60320ad6a100c6402c7279679f25fefb1fa3
cc8ea5e9f8db3222f83c7516dffd616b152f501ec8ad0552ab323db5fafd23876053317b483e00df
829e5c57bbca6f8ca01a87562edf1769dbd542a8f6287effc3ac6732c68c4f5573695b27b0bbca58
c8e1ffa35db8f011a010fa3d98fd2183b84afcb56c2dd1d35b9a53e479b6f84565d28e49bc4bfb97
90e1ddf2daa4cb7e3362fb1341cee4c6e8ef20cada36774c01d07e9efe2bf11fb495dbda4dae9091
98eaad8e716b93d5a0d08ed1d0afc725e08e3c5b2f8e7594b78ff6e2fbf2122b648888b812900df0
1c4fad5ea0688fc31cd1cff191b3a8c1ad2f2f2218be0e1777ea752dfe8b021fa1e5a0cc0fb56f74
e818acf3d6ce89e299b4a84fe0fd13e0b77cc43b81d2ada8d9165fa2668095770593cc7314211a14
77e6ad206577b5fa86c75442f5fb9d35cfebcdaf0c7b3e89a0d6411bd3ae1e7e4900250e2d2071b3
5e226800bb57b8e0af2464369bf009b91e5563911d59dfa6aa78c14389d95a537f207d5ba202e5b9
c5832603766295cfa911c819684e734a41b3472dca7b14a94a1b5100529a532915d60f573fbc9bc6
e42b60a47681e6740008ba6fb5571be91ff296ec6b2a0dd915b6636521e7b9f9b6ff34052ec58556
6453b02d5da99f8fa108ba47996e85076a4b7a70e9b5b32944db75092ec4192623ad6ea6b049a7df
7d9cee60b88fedb266ecaa8c71699a17ff5664526cc2b19ee1193602a575094c29a0591340e4183a
3e3f54989a5b429d656b8fe4d699f73fd6a1d29c07efe830f54d2d38e6f0255dc14cdd20868470eb
266382e9c6021ecc5e09686b3f3ebaefc93c9718146b6a70a1687f358452a0e286b79c5305aa5007
373e07841c7fdeae5c8e7d44ec5716f2b8b03ada37f0500c0df01c1f040200b3ffae0cf51a3cb574
b225837a58dc0921bdd19113f97ca92ff69432477322f547013ae5e58137c2dadcc8b576349af3dd
a7a94461460fd0030eecc8c73ea4751e41e238cd993bea0e2f3280bba1183eb3314e548b384f6db9
086f420d03f60a04bf2cb8129024977c795679b072bcaf89afde9a771fd9930810b38bae12dccf3f
2e5512721f2e6b7124501adde69f84cd877a5847187408da17bc9f9abce94b7d8cec7aec3adb851d
fa63094366c464c3d2ef1c18473215d908dd433b3724c2ba1612a14d432a65c45150940002133ae4
dd71dff89e10314e5581ac77d65f11199b043556f1d7a3c76b3c11183b5924a509f28fe6ed97f1fb
fa9ebabf2c1e153c6e86e34570eae96fb1860e5e0a5a3e2ab3771fe71c4e3d06fa2965dcb999e71d
0f803e89d65266c8252e4cc9789c10b36ac6150eba94e2ea78a5fc3c531e0a2df4f2f74ea7361d2b
3d1939260f19c279605223a708f71312b6ebadfe6eeac31f66e3bc4595a67bc883b17f37d1018cff
28c332ddefbe6c5aa56558218568ab9802eecea50fdb2f953b2aef7dad5b6e2f841521b628290761
70ecdd4775619f151013cca830eb61bd960334fe1eaa0363cfb5735c904c70a239d59e9e0bcbaade
14eecc86bc60622ca79cab5cabb2f3846e648b1eaf19bdf0caa02369b9655abb5040685a323c2ab4
b3319ee9d5c021b8f79b540b19875fa09995f7997e623d7da8f837889a97e32d7711ed935f166812
810e358829c7e61fd696dedfa17858ba9957f584a51b2272639b83c3ff1ac24696cdb30aeb532e30
548fd948e46dbc312858ebf2ef34c6ffeafe28ed61ee7c3c735d4a14d9e864b7e342105d14203e13
e045eee2b6a3aaabeadb6c4f15facb4fd0c742f442ef6abbb5654f3b1d41cd2105d81e799e86854d
c7e44b476a3d816250cf62a1f25b8d2646fc8883a0c1c7b6a37f1524c369cb749247848a0b5692b2
85095bbf00ad19489d1462b17423820e0058428d2a0c55f5ea1dadf43e233f70613372f0928d937e
41d65fecf16c223bdb7cde3759cbee74604085f2a7ce77326ea607808419f8509ee8efd85561d997
35a969a7aac50c06c25a04abfc800bcadc9e447a2ec3453484fdd567050e1e9ec9db73dbd3105588
cd675fda79e3674340c5c43465713e38d83d28f89ef16dff20153e21e78fb03d4ae6e39f2bdb83ad
f7e93d5a68948140f7f64c261c94692934411520f77602d4f7bcf46b2ed4a20068d40824713320f4
6a43b7d4b7500061af1e39f62e9724454614214f74bf8b88404d95fc1d96b591af70f4ddd366a02f
45bfbc09ec03bd97857fac6dd031cb850496eb27b355fd3941da2547e6abca0a9a28507825530429
f40a2c86dae9b66dfb68dc1462d7486900680ec0a427a18dee4f3ffea2e887ad8cb58ce0067af4d6
b6aace1e7cd3375fecce78a399406b2a4220fe9e35d9f385b9ee39d7ab3b124e8b1dc9faf74b6d18
5626a36631eae397b23a6efa74dd5b43326841e7f7ca7820fbfb0af54ed8feb397454056acba4895
2755533a3a20838d87fe6ba9b7d096954b55a867bca1159a58cca9296399e1db33a62a4a563f3125
f95ef47e1c9029317cfdf8e80204272f7080bb155c05282ce395c11548e4c66d2248c1133fc70f86
dc07f9c9ee41041f0f404779a45d886e17325f51ebd59bc0d1f2bcc18f41113564257b7834602a9c
60dff8e8a31f636c1b0e12b4c202e1329eaf664fd1cad181156b2395e0333e92e13b240b62eebeb9
2285b2a20ee6ba0d99de720c8c2da2f728d012784595b794fd647d0862e7ccf5f05449a36f877d48
fac39dfd27f33e8d1e0a476341992eff743a6f6eabf4f8fd37a812dc60a1ebddf8991be14cdb6e6b
0dc67b55106d672c372765d43bdcd0e804f1290dc7cc00ffa3b5390f92690fed0b667b9ffbcedb7d
9ca091cf0bd9155ea3bb132f88515bad247b9479bf763bd6eb37392eb3cc1159798026e297f42e31
2d6842ada7c66a2b3b12754ccc782ef11c6a124237b79251e706a1bbe64bfb63501a6b101811caed
fa3d25bdd8e2e1c3c9444216590a121386d90cec6ed5abea2a64af674eda86a85fbebfe98864e4c3
fe9dbc8057f0f7c08660787bf86003604dd1fd8346f6381fb07745ae04d736fccc83426b33f01eab
71b08041873c005e5f77a057bebde8ae2455464299bf582e614e58f48ff2ddfda2f474ef388789bd
c25366f9c3c8b38e74b475f25546fcd9b97aeb26618b1ddf84846a0e79915f95e2466e598e20b457
708cd55591c902de4cb90bace1bb8205d011a862487574a99eb77f19b6e0a9dc09662d09a1c43246
33e85a1f0209f0be8c4a99a0251d6efe101ab93d1d0ba5a4dfa186f20f2868f169dcb7da83573906
fea1e2ce9b4fcd7f5250115e01a70683faa002b5c40de6d0279af88c27773f8641c3604c0661a806
b5f0177a28c0f586e0006058aa30dc7d6211e69ed72338ea6353c2dd94c2c21634bbcbee5690bcb6
deebfc7da1ce591d766f05e4094b7c018839720a3d7c927c2486e3725f724d9db91ac15bb4d39eb8
fced54557808fca5b5d83d7cd34dad0fc41e50ef5eb161e6f8a28514d96c51133c6fd5c7e756e14e
c4362abfceddc6c837d79a323492638212670efa8e406000e03a39ce37d3faf5cfabc277375ac52d
1b5cb0679e4fa33742d382274099bc9bbed5118e9dbf0f7315d62d1c7ec700c47bb78c1b6b21a190
45b26eb1be6a366eb45748ab2fbc946e79c6a376d26549c2c8530ff8ee468dde7dd5730a1d4cd04d
c62939bbdba9ba4650ac9526e8be5ee304a1fad5f06a2d519a63ef8ce29a86ee22c089c2b843242e
f6a51e03aa9cf2d0a483c061ba9be96a4d8fe51550ba645bd62826a2f9a73a3ae14ba99586ef5562
e9c72fefd3f752f7da3f046f6977fa0a5980e4a91587b086019b09e6ad3b3ee593e990fd5a9e34d7
972cf0b7d9022b8b5196d5ac3a017da67dd1cf3ed67c7d2d281f9f25cfadf2b89b5ad6b4725a88f5
4ce029ac71e019a5e647b0acfded93fa9be8d3c48d283b57ccf8d5662979132e28785f0191ed7560
55f7960e44e3d35e8c15056dd488f46dba03a161250564f0bdc3eb9e153c9057a297271aeca93a07
2a1b3f6d9b1e6321f5f59c66fb26dcf3197533d928b155fdf5035634828aba3cbb28517711c20ad9
f8abcc5167ccad925f4de817513830dc8e379d58629320f991ea7a90c2fb3e7bce5121ce64774fbe
32a8b6e37ec3293d4648de53696413e680a2ae0810dd6db22469852dfd09072166b39a460a6445c0
dd586cdecf1c20c8ae5bbef7dd1b588d40ccd2017f6bb4e3bbdda26a7e3a59ff453e350a44bcb4cd
d572eacea8fa6484bb8d6612aebf3c6f47d29be463542f5d9eaec2771bf64e6370740e0d8de75b13
57f8721671af537d5d4040cb084eb4e2cc34d2466a0115af84e1b0042895983a1d06b89fb4ce6ea0
486f3f3b823520ab82011a1d4b277227f8611560b1e7933fdcbb3a792b344525bda08839e151ce79
4b2f32c9b7a01fbac9e01cc87ebcc7d1f6cf0111c3a1e8aac71a908749d44fbd9ad0dadecbd50ada
380339c32ac69136678df9317ce0b12b4ff79e59b743f5bb3af2d519ff27d9459cbf97222c15e6fc
2a0f91fc719b941525fae59361ceb69cebc2a8645912baa8d1b6c1075ee3056a0c10d25065cb03a4
42e0ec6e0e1698db3b4c98a0be3278e9649f1f9532e0d392dfd3a0342b8971f21e1b0a74414ba334
8cc5be7120c37632d8df359f8d9b992f2ee60b6f470fe3f11de54cda541edad891ce6279cfcd3e7e
6f1618b166fd2c1d05848fd2c5f6fb2299f523f357a632762393a8353156cccd02acf081625a75eb
b56e16369788d273ccde96629281b949d04c50901b71c65614e6c6c7bd327a140a45e1d006c3f27b
9ac9aa53fd62a80f00bb25bfe235bdd2f671126905b2040222b6cbcf7ccd769c2b53113ec01640e3
d338abbd602547adf0ba38209cf746ce7677afa1c52075606085cbfe4e8ae88dd87aaaf9b04cf9aa
7e1948c25c02fb8a8c01c36ae4d6ebe1f990d4f869a65cdea03f09252dc208e69fb74e6132ce77e2
5b578fdfe33ac372e6'''.split())
def test_hex_pi_nth_digits():
assert pi_hex_digits(0) == '3243f6a8885a30'
assert pi_hex_digits(1) == '243f6a8885a308'
assert pi_hex_digits(10000) == '68ac8fcfb8016c'
assert pi_hex_digits(13) == '08d313198a2e03'
assert pi_hex_digits(0, 3) == '324'
assert pi_hex_digits(0, 0) == ''
raises(ValueError, lambda: pi_hex_digits(-1))
raises(ValueError, lambda: pi_hex_digits(3.14))
# this will pick a random segment to compute every time
# it is run. If it ever fails, there is an error in the
# computation.
n = randint(0, len(dig))
prec = randint(0, len(dig) - n)
assert pi_hex_digits(n, prec) == dig[n: n + prec]
| bsd-3-clause |
hustcc/iOS-private-api-checker | utils/lib/xlsxwriter/styles.py | 13 | 20948 | ###############################################################################
#
# Styles - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
# Package imports.
from . import xmlwriter
class Styles(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Styles file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Styles, self).__init__()
self.xf_formats = []
self.palette = []
self.font_count = 0
self.num_format_count = 0
self.border_count = 0
self.fill_count = 0
self.custom_colors = []
self.dxf_formats = []
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Add the style sheet.
self._write_style_sheet()
# Write the number formats.
self._write_num_fmts()
# Write the fonts.
self._write_fonts()
# Write the fills.
self._write_fills()
# Write the borders element.
self._write_borders()
# Write the cellStyleXfs element.
self._write_cell_style_xfs()
# Write the cellXfs element.
self._write_cell_xfs()
# Write the cellStyles element.
self._write_cell_styles()
# Write the dxfs element.
self._write_dxfs()
# Write the tableStyles element.
self._write_table_styles()
# Write the colors element.
self._write_colors()
# Close the style sheet tag.
self._xml_end_tag('styleSheet')
# Close the file.
self._xml_close()
def _set_style_properties(self, properties):
# Pass in the Format objects and other properties used in the styles.
self.xf_formats = properties[0]
self.palette = properties[1]
self.font_count = properties[2]
self.num_format_count = properties[3]
self.border_count = properties[4]
self.fill_count = properties[5]
self.custom_colors = properties[6]
self.dxf_formats = properties[7]
def _get_palette_color(self, color):
# Convert the RGB color.
if color[0] == '#':
color = color[1:]
return "FF" + color.upper()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_style_sheet(self):
# Write the <styleSheet> element.
xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
attributes = [('xmlns', xmlns)]
self._xml_start_tag('styleSheet', attributes)
def _write_num_fmts(self):
# Write the <numFmts> element.
if not self.num_format_count:
return
attributes = [('count', self.num_format_count)]
self._xml_start_tag('numFmts', attributes)
# Write the numFmts elements.
for xf_format in self.xf_formats:
# Ignore built-in number formats, i.e., < 164.
if xf_format.num_format_index >= 164:
self._write_num_fmt(xf_format.num_format_index,
xf_format.num_format)
self._xml_end_tag('numFmts')
def _write_num_fmt(self, num_fmt_id, format_code):
# Write the <numFmt> element.
format_codes = {
0: 'General',
1: '0',
2: '0.00',
3: '#,##0',
4: '#,##0.00',
5: '($#,##0_);($#,##0)',
6: '($#,##0_);[Red]($#,##0)',
7: '($#,##0.00_);($#,##0.00)',
8: '($#,##0.00_);[Red]($#,##0.00)',
9: '0%',
10: '0.00%',
11: '0.00E+00',
12: '# ?/?',
13: '# ??/??',
14: 'm/d/yy',
15: 'd-mmm-yy',
16: 'd-mmm',
17: 'mmm-yy',
18: 'h:mm AM/PM',
19: 'h:mm:ss AM/PM',
20: 'h:mm',
21: 'h:mm:ss',
22: 'm/d/yy h:mm',
37: '(#,##0_);(#,##0)',
38: '(#,##0_);[Red](#,##0)',
39: '(#,##0.00_);(#,##0.00)',
40: '(#,##0.00_);[Red](#,##0.00)',
41: '_(* #,##0_);_(* (#,##0);_(* "-"_);_(_)',
42: '_($* #,##0_);_($* (#,##0);_($* "-"_);_(_)',
43: '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(_)',
44: '_($* #,##0.00_);_($* (#,##0.00);_($* "-"??_);_(_)',
45: 'mm:ss',
46: '[h]:mm:ss',
47: 'mm:ss.0',
48: '##0.0E+0',
49: '@'}
# Set the format code for built-in number formats.
if num_fmt_id < 164:
if num_fmt_id in format_codes:
format_code = format_codes[num_fmt_id]
else:
format_code = 'General'
attributes = [
('numFmtId', num_fmt_id),
('formatCode', format_code),
]
self._xml_empty_tag('numFmt', attributes)
def _write_fonts(self):
# Write the <fonts> element.
attributes = [('count', self.font_count)]
self._xml_start_tag('fonts', attributes)
# Write the font elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_font:
self._write_font(xf_format)
self._xml_end_tag('fonts')
def _write_font(self, xf_format, is_dxf_format=False):
# Write the <font> element.
self._xml_start_tag('font')
# The condense and extend elements are mainly used in dxf formats.
if xf_format.font_condense:
self._write_condense()
if xf_format.font_extend:
self._write_extend()
if xf_format.bold:
self._xml_empty_tag('b')
if xf_format.italic:
self._xml_empty_tag('i')
if xf_format.font_strikeout:
self._xml_empty_tag('strike')
if xf_format.font_outline:
self._xml_empty_tag('outline')
if xf_format.font_shadow:
self._xml_empty_tag('shadow')
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
if xf_format.font_script == 1:
self._write_vert_align('superscript')
if xf_format.font_script == 2:
self._write_vert_align('subscript')
if not is_dxf_format:
self._xml_empty_tag('sz', [('val', xf_format.font_size)])
if xf_format.theme == -1:
# Ignore for excel2003_style.
pass
elif xf_format.theme:
self._write_color('theme', xf_format.theme)
elif xf_format.color_indexed:
self._write_color('indexed', xf_format.color_indexed)
elif xf_format.font_color:
color = self._get_palette_color(xf_format.font_color)
self._write_color('rgb', color)
elif not is_dxf_format:
self._write_color('theme', 1)
if not is_dxf_format:
self._xml_empty_tag('name', [('val', xf_format.font_name)])
if xf_format.font_family:
self._xml_empty_tag('family', [('val', xf_format.font_family)])
if xf_format.font_name == 'Calibri' and not xf_format.hyperlink:
self._xml_empty_tag(
'scheme',
[('val', xf_format.font_scheme)])
self._xml_end_tag('font')
def _write_underline(self, underline):
# Write the underline font element.
if underline == 2:
attributes = [('val', 'double')]
elif underline == 33:
attributes = [('val', 'singleAccounting')]
elif underline == 34:
attributes = [('val', 'doubleAccounting')]
else:
# Default to single underline.
attributes = []
self._xml_empty_tag('u', attributes)
def _write_vert_align(self, val):
# Write the <vertAlign> font sub-element.
attributes = [('val', val)]
self._xml_empty_tag('vertAlign', attributes)
def _write_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self._xml_empty_tag('color', attributes)
def _write_fills(self):
# Write the <fills> element.
attributes = [('count', self.fill_count)]
self._xml_start_tag('fills', attributes)
# Write the default fill element.
self._write_default_fill('none')
self._write_default_fill('gray125')
# Write the fill elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_fill:
self._write_fill(xf_format)
self._xml_end_tag('fills')
def _write_default_fill(self, pattern_type):
# Write the <fill> element for the default fills.
self._xml_start_tag('fill')
self._xml_empty_tag('patternFill', [('patternType', pattern_type)])
self._xml_end_tag('fill')
def _write_fill(self, xf_format, is_dxf_format=False):
# Write the <fill> element.
pattern = xf_format.pattern
bg_color = xf_format.bg_color
fg_color = xf_format.fg_color
# Colors for dxf formats are handled differently from normal formats
# since the normal xf_format reverses the meaning of BG and FG for
# solid fills.
if is_dxf_format:
bg_color = xf_format.dxf_bg_color
fg_color = xf_format.dxf_fg_color
patterns = (
'none',
'solid',
'mediumGray',
'darkGray',
'lightGray',
'darkHorizontal',
'darkVertical',
'darkDown',
'darkUp',
'darkGrid',
'darkTrellis',
'lightHorizontal',
'lightVertical',
'lightDown',
'lightUp',
'lightGrid',
'lightTrellis',
'gray125',
'gray0625',
)
self._xml_start_tag('fill')
# The "none" pattern is handled differently for dxf formats.
if is_dxf_format and pattern <= 1:
self._xml_start_tag('patternFill')
else:
self._xml_start_tag(
'patternFill',
[('patternType', patterns[pattern])])
if fg_color:
fg_color = self._get_palette_color(fg_color)
self._xml_empty_tag('fgColor', [('rgb', fg_color)])
if bg_color:
bg_color = self._get_palette_color(bg_color)
self._xml_empty_tag('bgColor', [('rgb', bg_color)])
else:
if not is_dxf_format:
self._xml_empty_tag('bgColor', [('indexed', 64)])
self._xml_end_tag('patternFill')
self._xml_end_tag('fill')
def _write_borders(self):
# Write the <borders> element.
attributes = [('count', self.border_count)]
self._xml_start_tag('borders', attributes)
# Write the border elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_border:
self._write_border(xf_format)
self._xml_end_tag('borders')
def _write_border(self, xf_format, is_dxf_format=False):
# Write the <border> element.
attributes = []
# Diagonal borders add attributes to the <border> element.
if xf_format.diag_type == 1:
attributes.append(('diagonalUp', 1))
elif xf_format.diag_type == 2:
attributes.append(('diagonalDown', 1))
elif xf_format.diag_type == 3:
attributes.append(('diagonalUp', 1))
attributes.append(('diagonalDown', 1))
# Ensure that a default diag border is set if the diag type is set.
if xf_format.diag_type and not xf_format.diag_border:
xf_format.diag_border = 1
# Write the start border tag.
self._xml_start_tag('border', attributes)
# Write the <border> sub elements.
self._write_sub_border(
'left',
xf_format.left,
xf_format.left_color)
self._write_sub_border(
'right',
xf_format.right,
xf_format.right_color)
self._write_sub_border(
'top',
xf_format.top,
xf_format.top_color)
self._write_sub_border(
'bottom',
xf_format.bottom,
xf_format.bottom_color)
# Condition DXF formats don't allow diagonal borders.
if not is_dxf_format:
self._write_sub_border(
'diagonal',
xf_format.diag_border,
xf_format.diag_color)
if is_dxf_format:
self._write_sub_border('vertical', None, None)
self._write_sub_border('horizontal', None, None)
self._xml_end_tag('border')
def _write_sub_border(self, border_type, style, color):
# Write the <border> sub elements such as <right>, <top>, etc.
attributes = []
if not style:
self._xml_empty_tag(border_type)
return
border_styles = (
'none',
'thin',
'medium',
'dashed',
'dotted',
'thick',
'double',
'hair',
'mediumDashed',
'dashDot',
'mediumDashDot',
'dashDotDot',
'mediumDashDotDot',
'slantDashDot',
)
attributes.append(('style', border_styles[style]))
self._xml_start_tag(border_type, attributes)
if color:
color = self._get_palette_color(color)
self._xml_empty_tag('color', [('rgb', color)])
else:
self._xml_empty_tag('color', [('auto', 1)])
self._xml_end_tag(border_type)
def _write_cell_style_xfs(self):
# Write the <cellStyleXfs> element.
attributes = [('count', 1)]
self._xml_start_tag('cellStyleXfs', attributes)
self._write_style_xf()
self._xml_end_tag('cellStyleXfs')
def _write_cell_xfs(self):
# Write the <cellXfs> element.
formats = self.xf_formats
# Workaround for when the last xf_format is used for the comment font
# and shouldn't be used for cellXfs.
last_format = formats[-1]
if last_format.font_only:
formats.pop()
attributes = [('count', len(formats))]
self._xml_start_tag('cellXfs', attributes)
# Write the xf elements.
for xf_format in formats:
self._write_xf(xf_format)
self._xml_end_tag('cellXfs')
def _write_style_xf(self):
# Write the style <xf> element.
num_fmt_id = 0
font_id = 0
fill_id = 0
border_id = 0
attributes = [
('numFmtId', num_fmt_id),
('fontId', font_id),
('fillId', fill_id),
('borderId', border_id),
]
self._xml_empty_tag('xf', attributes)
def _write_xf(self, xf_format):
# Write the <xf> element.
num_fmt_id = xf_format.num_format_index
font_id = xf_format.font_index
fill_id = xf_format.fill_index
border_id = xf_format.border_index
xf_id = 0
has_align = 0
has_protect = 0
attributes = [
('numFmtId', num_fmt_id),
('fontId', font_id),
('fillId', fill_id),
('borderId', border_id),
('xfId', xf_id),
]
if xf_format.num_format_index > 0:
attributes.append(('applyNumberFormat', 1))
# Add applyFont attribute if XF format uses a font element.
if xf_format.font_index > 0:
attributes.append(('applyFont', 1))
# Add applyFill attribute if XF format uses a fill element.
if xf_format.fill_index > 0:
attributes.append(('applyFill', 1))
# Add applyBorder attribute if XF format uses a border element.
if xf_format.border_index > 0:
attributes.append(('applyBorder', 1))
# Check if XF format has alignment properties set.
(apply_align, align) = xf_format._get_align_properties()
# Check if an alignment sub-element should be written.
if apply_align and align:
has_align = 1
# We can also have applyAlignment without a sub-element.
if apply_align:
attributes.append(('applyAlignment', 1))
# Check for cell protection properties.
protection = xf_format._get_protection_properties()
if protection:
attributes.append(('applyProtection', 1))
has_protect = 1
# Write XF with sub-elements if required.
if has_align or has_protect:
self._xml_start_tag('xf', attributes)
if has_align:
self._xml_empty_tag('alignment', align)
if has_protect:
self._xml_empty_tag('protection', protection)
self._xml_end_tag('xf')
else:
self._xml_empty_tag('xf', attributes)
def _write_cell_styles(self):
# Write the <cellStyles> element.
attributes = [('count', 1)]
self._xml_start_tag('cellStyles', attributes)
self._write_cell_style()
self._xml_end_tag('cellStyles')
def _write_cell_style(self):
# Write the <cellStyle> element.
name = 'Normal'
xf_id = 0
builtin_id = 0
attributes = [
('name', name),
('xfId', xf_id),
('builtinId', builtin_id),
]
self._xml_empty_tag('cellStyle', attributes)
def _write_dxfs(self):
# Write the <dxfs> element.
formats = self.dxf_formats
count = len(formats)
attributes = [('count', len(formats))]
if count:
self._xml_start_tag('dxfs', attributes)
# Write the font elements for xf_format objects that have them.
for xf_format in self.dxf_formats:
self._xml_start_tag('dxf')
if xf_format.has_dxf_font:
self._write_font(xf_format, True)
if xf_format.num_format_index:
self._write_num_fmt(xf_format.num_format_index,
xf_format.num_format)
if xf_format.has_dxf_fill:
self._write_fill(xf_format, True)
if xf_format.has_dxf_border:
self._write_border(xf_format, True)
self._xml_end_tag('dxf')
self._xml_end_tag('dxfs')
else:
self._xml_empty_tag('dxfs', attributes)
def _write_table_styles(self):
# Write the <tableStyles> element.
count = 0
default_table_style = 'TableStyleMedium9'
default_pivot_style = 'PivotStyleLight16'
attributes = [
('count', count),
('defaultTableStyle', default_table_style),
('defaultPivotStyle', default_pivot_style),
]
self._xml_empty_tag('tableStyles', attributes)
def _write_colors(self):
# Write the <colors> element.
custom_colors = self.custom_colors
if not custom_colors:
return
self._xml_start_tag('colors')
self._write_mru_colors(custom_colors)
self._xml_end_tag('colors')
def _write_mru_colors(self, custom_colors):
# Write the <mruColors> element for the most recently used colours.
# Write the custom custom_colors in reverse order.
custom_colors.reverse()
# Limit the mruColors to the last 10.
if len(custom_colors) > 10:
custom_colors = custom_colors[0:10]
self._xml_start_tag('mruColors')
# Write the custom custom_colors in reverse order.
for color in custom_colors:
self._write_color('rgb', color)
self._xml_end_tag('mruColors')
def _write_condense(self):
# Write the <condense> element.
attributes = [('val', 0)]
self._xml_empty_tag('condense', attributes)
def _write_extend(self):
# Write the <extend> element.
attributes = [('val', 0)]
self._xml_empty_tag('extend', attributes)
| gpl-2.0 |
abhattad4/Digi-Menu | digimenu2/django/core/management/commands/sqlmigrate.py | 463 | 2715 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
class Command(BaseCommand):
help = "Prints the SQL statements for the named migration."
output_transaction = True
def add_arguments(self, parser):
parser.add_argument('app_label',
help='App label of the application containing the migration.')
parser.add_argument('migration_name',
help='Migration name to print the SQL for.')
parser.add_argument('--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to create SQL for. Defaults to the '
'"default" database.')
parser.add_argument('--backwards', action='store_true', dest='backwards',
default=False, help='Creates SQL to unapply the migration, rather than to apply it')
def execute(self, *args, **options):
# sqlmigrate doesn't support coloring its output but we need to force
# no_color=True so that the BEGIN/COMMIT statements added by
# output_transaction don't get colored either.
options['no_color'] = True
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
# Get the database we're operating from
connection = connections[options['database']]
# Load up an executor to get all the migration data
executor = MigrationExecutor(connection)
# Resolve command-line arguments into a migration
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations" % app_label)
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (
migration_name, app_label))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % (
migration_name, app_label))
targets = [(app_label, migration.name)]
# Make a plan that represents just the requested migrations and show SQL
# for it
plan = [(executor.loader.graph.nodes[targets[0]], options['backwards'])]
sql_statements = executor.collect_sql(plan)
return '\n'.join(sql_statements)
| bsd-3-clause |
Glottotopia/aagd | moin/local/moin/MoinMoin/support/pygments/styles/manni.py | 3 | 2449 | # -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
| mit |
ericlink/adms-server | playframework-dist/play-1.1/python/Lib/csv.py | 2 | 15523 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "excel", "excel_tab", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
def __iter__(self):
return self
def next(self):
row = self.reader.next()
if self.fieldnames is None:
self.fieldnames = row
row = self.reader.next()
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
for k in rowdict.keys():
if k not in self.fieldnames:
raise ValueError, "dict contains fields not in fieldnames"
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
doublequote = False
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
return (quotechar, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| mit |
muffinresearch/olympia | apps/api/views.py | 13 | 18404 | """
API views
"""
import hashlib
import itertools
import json
import random
import urllib
from datetime import date, timedelta
from django.core.cache import cache
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import render
from django.template.context import get_standard_processors
from django.utils import encoding, translation
from django.utils.encoding import smart_str
from django.views.decorators.csrf import csrf_exempt
import commonware.log
import jingo
import waffle
from caching.base import cached_with
from piston.utils import rc
from tower import ugettext as _, ugettext_lazy
import amo
import api
from addons.models import Addon, CompatOverride
from amo.decorators import post_required, allow_cross_site_request, json_view
from amo.models import manual_order
from amo.urlresolvers import get_url_prefix
from amo.utils import JSONEncoder
from api.authentication import AMOOAuthAuthentication
from api.forms import PerformanceForm
from api.utils import addon_to_dict, extract_filters
from perf.models import (Performance, PerformanceAppVersions,
PerformanceOSVersion)
from search.views import (AddonSuggestionsAjax, PersonaSuggestionsAjax,
name_query)
from versions.compare import version_int
ERROR = 'error'
OUT_OF_DATE = ugettext_lazy(
u"The API version, {0:.1f}, you are using is not valid. "
u"Please upgrade to the current version {1:.1f} API.")
SEARCHABLE_STATUSES = (amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED)
xml_env = jingo.env.overlay()
old_finalize = xml_env.finalize
xml_env.finalize = lambda x: amo.helpers.strip_controls(old_finalize(x))
# Hard limit of 30. The buffer is to try for locale-specific add-ons.
MAX_LIMIT, BUFFER = 30, 10
# "New" is arbitrarily defined as 10 days old.
NEW_DAYS = 10
log = commonware.log.getLogger('z.api')
def partition(seq, key):
"""Group a sequence based into buckets by key(x)."""
groups = itertools.groupby(sorted(seq, key=key), key=key)
return ((k, list(v)) for k, v in groups)
def render_xml_to_string(request, template, context={}):
if not jingo._helpers_loaded:
jingo.load_helpers()
for processor in get_standard_processors():
context.update(processor(request))
template = xml_env.get_template(template)
return template.render(context)
def render_xml(request, template, context={}, **kwargs):
"""Safely renders xml, stripping out nasty control characters."""
rendered = render_xml_to_string(request, template, context)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'text/xml'
return HttpResponse(rendered, **kwargs)
def handler403(request):
context = {'error_level': ERROR, 'msg': 'Not allowed'}
return render_xml(request, 'api/message.xml', context, status=403)
def handler404(request):
context = {'error_level': ERROR, 'msg': 'Not Found'}
return render_xml(request, 'api/message.xml', context, status=404)
def handler500(request):
context = {'error_level': ERROR, 'msg': 'Server Error'}
return render_xml(request, 'api/message.xml', context, status=500)
def validate_api_version(version):
"""
We want to be able to deprecate old versions of the API, therefore we check
for a minimum API version before continuing.
"""
if float(version) < api.MIN_VERSION:
return False
if float(version) > api.MAX_VERSION:
return False
return True
def addon_filter(addons, addon_type, limit, app, platform, version,
compat_mode='strict', shuffle=True):
"""
Filter addons by type, application, app version, and platform.
Add-ons that support the current locale will be sorted to front of list.
Shuffling will be applied to the add-ons supporting the locale and the
others separately.
Doing this in the database takes too long, so we in code and wrap it in
generous caching.
"""
APP = app
if addon_type.upper() != 'ALL':
try:
addon_type = int(addon_type)
if addon_type:
addons = [a for a in addons if a.type == addon_type]
except ValueError:
# `addon_type` is ALL or a type id. Otherwise we ignore it.
pass
# Take out personas since they don't have versions.
groups = dict(partition(addons,
lambda x: x.type == amo.ADDON_PERSONA))
personas, addons = groups.get(True, []), groups.get(False, [])
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
def f(ps):
return pid in ps or amo.PLATFORM_ALL in ps
pid = amo.PLATFORM_DICT[platform]
addons = [a for a in addons
if f(a.current_version.supported_platforms)]
if version is not None:
vint = version_int(version)
def f_strict(app):
return app.min.version_int <= vint <= app.max.version_int
def f_ignore(app):
return app.min.version_int <= vint
xs = [(a, a.compatible_apps) for a in addons]
# Iterate over addons, checking compatibility depending on compat_mode.
addons = []
for addon, apps in xs:
app = apps.get(APP)
if compat_mode == 'strict':
if app and f_strict(app):
addons.append(addon)
elif compat_mode == 'ignore':
if app and f_ignore(app):
addons.append(addon)
elif compat_mode == 'normal':
# This does a db hit but it's cached. This handles the cases
# for strict opt-in, binary components, and compat overrides.
v = addon.compatible_version(APP.id, version, platform,
compat_mode)
if v: # There's a compatible version.
addons.append(addon)
# Put personas back in.
addons.extend(personas)
# We prefer add-ons that support the current locale.
lang = translation.get_language()
def partitioner(x):
return x.description is not None and (x.description.locale == lang)
groups = dict(partition(addons, partitioner))
good, others = groups.get(True, []), groups.get(False, [])
if shuffle:
random.shuffle(good)
random.shuffle(others)
# If limit=0, we return all addons with `good` coming before `others`.
# Otherwise pad `good` if less than the limit and return the limit.
if limit > 0:
if len(good) < limit:
good.extend(others[:limit - len(good)])
return good[:limit]
else:
good.extend(others)
return good
class APIView(object):
"""
Base view class for all API views.
"""
def __call__(self, request, api_version, *args, **kwargs):
self.version = float(api_version)
self.format = request.REQUEST.get('format', 'xml')
self.content_type = ('text/xml' if self.format == 'xml'
else 'application/json')
self.request = request
if not validate_api_version(api_version):
msg = OUT_OF_DATE.format(self.version, api.CURRENT_VERSION)
return self.render_msg(msg, ERROR, status=403,
content_type=self.content_type)
return self.process_request(*args, **kwargs)
def render_msg(self, msg, error_level=None, *args, **kwargs):
"""
Renders a simple message.
"""
if self.format == 'xml':
return render_xml(
self.request, 'api/message.xml',
{'error_level': error_level, 'msg': msg}, *args, **kwargs)
else:
return HttpResponse(json.dumps({'msg': _(msg)}), *args, **kwargs)
def render(self, template, context):
context['api_version'] = self.version
context['api'] = api
if self.format == 'xml':
return render_xml(self.request, template, context,
content_type=self.content_type)
else:
return HttpResponse(self.render_json(context),
content_type=self.content_type)
def render_json(self, context):
return json.dumps({'msg': _('Not implemented yet.')})
class AddonDetailView(APIView):
@allow_cross_site_request
def process_request(self, addon_id):
try:
addon = Addon.objects.id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return self.render_msg(
'Add-on not found!', ERROR, status=404,
content_type=self.content_type
)
if addon.is_disabled:
return self.render_msg('Add-on disabled.', ERROR, status=404,
content_type=self.content_type)
return self.render_addon(addon)
def render_addon(self, addon):
return self.render('api/addon_detail.xml', {'addon': addon})
def render_json(self, context):
return json.dumps(addon_to_dict(context['addon']), cls=JSONEncoder)
def guid_search(request, api_version, guids):
lang = request.LANG
def guid_search_cache_key(guid):
key = 'guid_search:%s:%s:%s' % (api_version, lang, guid)
return hashlib.md5(smart_str(key)).hexdigest()
guids = [g.strip() for g in guids.split(',')] if guids else []
addons_xml = cache.get_many([guid_search_cache_key(g) for g in guids])
dirty_keys = set()
for g in guids:
key = guid_search_cache_key(g)
if key not in addons_xml:
dirty_keys.add(key)
try:
addon = Addon.objects.get(guid=g, disabled_by_user=False,
status__in=SEARCHABLE_STATUSES)
except Addon.DoesNotExist:
addons_xml[key] = ''
else:
addon_xml = render_xml_to_string(request,
'api/includes/addon.xml',
{'addon': addon,
'api_version': api_version,
'api': api})
addons_xml[key] = addon_xml
cache.set_many(dict((k, v) for k, v in addons_xml.iteritems()
if k in dirty_keys))
compat = (CompatOverride.objects.filter(guid__in=guids)
.transform(CompatOverride.transformer))
addons_xml = [v for v in addons_xml.values() if v]
return render_xml(request, 'api/search.xml',
{'addons_xml': addons_xml,
'total': len(addons_xml),
'compat': compat,
'api_version': api_version, 'api': api})
class SearchView(APIView):
def process_request(self, query, addon_type='ALL', limit=10,
platform='ALL', version=None, compat_mode='strict'):
"""
Query the search backend and serve up the XML.
"""
limit = min(MAX_LIMIT, int(limit))
app_id = self.request.APP.id
# We currently filter for status=PUBLIC for all versions. If
# that changes, the contract for API version 1.5 requires
# that we continue filtering for it there.
filters = {
'app': app_id,
'status': amo.STATUS_PUBLIC,
'is_disabled': False,
'has_version': True,
}
# Opts may get overridden by query string filters.
opts = {
'addon_type': addon_type,
'version': version,
}
# Specific case for Personas (bug 990768): if we search providing the
# Persona addon type (9), don't filter on the platform as Personas
# don't have compatible platforms to filter on.
if addon_type != '9':
opts['platform'] = platform
if self.version < 1.5:
# Fix doubly encoded query strings.
try:
query = urllib.unquote(query.encode('ascii'))
except UnicodeEncodeError:
# This fails if the string is already UTF-8.
pass
query, qs_filters, params = extract_filters(query, opts)
qs = Addon.search().query(or_=name_query(query))
filters.update(qs_filters)
if 'type' not in filters:
# Filter by ALL types, which is really all types except for apps.
filters['type__in'] = list(amo.ADDON_SEARCH_TYPES)
qs = qs.filter(**filters)
qs = qs[:limit]
total = qs.count()
results = []
for addon in qs:
compat_version = addon.compatible_version(app_id,
params['version'],
params['platform'],
compat_mode)
# Specific case for Personas (bug 990768): if we search providing
# the Persona addon type (9), then don't look for a compatible
# version.
if compat_version or addon_type == '9':
addon.compat_version = compat_version
results.append(addon)
if len(results) == limit:
break
else:
# We're excluding this addon because there are no
# compatible versions. Decrement the total.
total -= 1
return self.render('api/search.xml', {
'results': results,
'total': total,
# For caching
'version': version,
'compat_mode': compat_mode,
})
@json_view
def search_suggestions(request):
if waffle.sample_is_active('autosuggest-throttle'):
return HttpResponse(status=503)
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
items = suggesterClass(request, ratings=True).items
for s in items:
s['rating'] = float(s['rating'])
return {'suggestions': items}
class ListView(APIView):
def process_request(self, list_type='recommended', addon_type='ALL',
limit=10, platform='ALL', version=None,
compat_mode='strict'):
"""
Find a list of new or featured add-ons. Filtering is done in Python
for cache-friendliness and to avoid heavy queries.
"""
limit = min(MAX_LIMIT, int(limit))
APP, platform = self.request.APP, platform.lower()
qs = Addon.objects.listed(APP)
shuffle = True
if list_type in ('by_adu', 'featured'):
qs = qs.exclude(type=amo.ADDON_PERSONA)
if list_type == 'newest':
new = date.today() - timedelta(days=NEW_DAYS)
addons = (qs.filter(created__gte=new)
.order_by('-created'))[:limit + BUFFER]
elif list_type == 'by_adu':
addons = qs.order_by('-average_daily_users')[:limit + BUFFER]
shuffle = False # By_adu is an ordered list.
elif list_type == 'hotness':
# Filter to type=1 so we hit visible_idx. Only extensions have a
# hotness index right now so this is not incorrect.
addons = (qs.filter(type=amo.ADDON_EXTENSION)
.order_by('-hotness'))[:limit + BUFFER]
shuffle = False
else:
ids = Addon.featured_random(APP, self.request.LANG)
addons = manual_order(qs, ids[:limit + BUFFER], 'addons.id')
shuffle = False
args = (addon_type, limit, APP, platform, version, compat_mode,
shuffle)
def f():
return self._process(addons, *args)
return cached_with(addons, f, map(encoding.smart_str, args))
def _process(self, addons, *args):
return self.render('api/list.xml',
{'addons': addon_filter(addons, *args)})
def render_json(self, context):
return json.dumps([addon_to_dict(a) for a in context['addons']],
cls=JSONEncoder)
class LanguageView(APIView):
def process_request(self):
addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
type=amo.ADDON_LPAPP,
appsupport__app=self.request.APP.id,
disabled_by_user=False).order_by('pk')
return self.render('api/list.xml', {'addons': addons,
'show_localepicker': True})
# pylint: disable-msg=W0613
def redirect_view(request, url):
"""
Redirect all requests that come here to an API call with a view parameter.
"""
dest = '/api/%.1f/%s' % (api.CURRENT_VERSION,
urllib.quote(url.encode('utf-8')))
dest = get_url_prefix().fix(dest)
return HttpResponsePermanentRedirect(dest)
def request_token_ready(request, token):
error = request.GET.get('error', '')
ctx = {'error': error, 'token': token}
return render(request, 'piston/request_token_ready.html', ctx)
@csrf_exempt
@post_required
def performance_add(request):
"""
A wrapper around adding in performance data that is easier than
using the piston API.
"""
# Trigger OAuth.
if not AMOOAuthAuthentication(two_legged=True).is_authenticated(request):
return rc.FORBIDDEN
form = PerformanceForm(request.POST)
if not form.is_valid():
return form.show_error()
os, created = (PerformanceOSVersion
.objects.safer_get_or_create(**form.os_version))
app, created = (PerformanceAppVersions
.objects.safer_get_or_create(**form.app_version))
data = form.performance
data.update({'osversion': os, 'appversion': app})
# Look up on everything except the average time.
result, created = Performance.objects.safer_get_or_create(**data)
result.average = form.cleaned_data['average']
result.save()
log.info('Performance created for add-on: %s, %s' %
(form.cleaned_data['addon_id'], form.cleaned_data['average']))
return rc.ALL_OK
| bsd-3-clause |
XiaosongWei/chromium-crosswalk | native_client_sdk/src/build_tools/sdk_tools/command/list.py | 152 | 1736 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def List(remote_manifest, local_manifest, display_revisions):
any_bundles_need_update = False
print 'Bundles:'
print ' I: installed\n *: update available\n'
for bundle in remote_manifest.GetBundles():
local_bundle = local_manifest.GetBundle(bundle.name)
needs_update = local_bundle and local_manifest.BundleNeedsUpdate(bundle)
if needs_update:
any_bundles_need_update = True
_PrintBundle(local_bundle, bundle, needs_update, display_revisions)
if not any_bundles_need_update:
print '\nAll installed bundles are up-to-date.'
local_only_bundles = set([b.name for b in local_manifest.GetBundles()])
local_only_bundles -= set([b.name for b in remote_manifest.GetBundles()])
if local_only_bundles:
print '\nBundles installed locally that are not available remotely:'
for bundle_name in local_only_bundles:
local_bundle = local_manifest.GetBundle(bundle_name)
_PrintBundle(local_bundle, None, False, display_revisions)
def _PrintBundle(local_bundle, bundle, needs_update, display_revisions):
installed = local_bundle is not None
# If bundle is None, there is no longer a remote bundle with this name.
if bundle is None:
bundle = local_bundle
if display_revisions:
if needs_update:
revision = ' (r%s -> r%s)' % (local_bundle.revision, bundle.revision)
else:
revision = ' (r%s)' % (bundle.revision,)
else:
revision = ''
print (' %s%s %s (%s)%s' % (
'I' if installed else ' ',
'*' if needs_update else ' ',
bundle.name,
bundle.stability,
revision))
| bsd-3-clause |
JimCircadian/ansible | lib/ansible/modules/network/system/net_banner.py | 40 | 2076 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_banner
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage multiline banners on network devices
description:
- This will configure both login and motd banners on network devices.
It allows playbooks to add or remove
banner text from the active running configuration.
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
net_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
net_banner:
banner: motd
state: absent
- name: Configure banner from file
net_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
"""
| gpl-3.0 |
naslanidis/ansible | lib/ansible/utils/module_docs_fragments/cloudengine.py | 28 | 2959 | #
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(netconf). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, netconf=22).
required: false
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate the CLI login.
If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for cli
transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is used for the I(cli)
transport. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over cli (ssh).
required: true
default: cli
provider:
description:
- Convenience method that allows all I(cloudengine) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
naokimiyasaka/sublime-text | Backup/20140106101521/ConvertToUTF8/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| mit |
stutivarshney/Bal-Aveksha | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/rest_framework/filters.py | 6 | 13069 | """
Provides generic filtering backends that can be used to filter the results
returned by list views.
"""
from __future__ import unicode_literals
import operator
import warnings
from functools import reduce
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.template import loader
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import (
coreapi, coreschema, distinct, django_filters, guardian, template_render
)
from rest_framework.settings import api_settings
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return []
if django_filters:
from django_filters.rest_framework.filterset import FilterSet as DFFilterSet
class FilterSet(DFFilterSet):
def __init__(self, *args, **kwargs):
warnings.warn(
"The built in 'rest_framework.filters.FilterSet' is deprecated. "
"You should use 'django_filters.rest_framework.FilterSet' instead.",
DeprecationWarning
)
return super(FilterSet, self).__init__(*args, **kwargs)
else:
def FilterSet():
assert False, 'django-filter must be installed to use the `FilterSet` class'
class DjangoFilterBackend(BaseFilterBackend):
"""
A filter backend that uses django-filter.
"""
def __new__(cls, *args, **kwargs):
assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'
assert django_filters.VERSION >= (0, 15, 3), 'django-filter 0.15.3 and above is required'
warnings.warn(
"The built in 'rest_framework.filters.DjangoFilterBackend' is deprecated. "
"You should use 'django_filters.rest_framework.DjangoFilterBackend' instead.",
DeprecationWarning
)
from django_filters.rest_framework import DjangoFilterBackend
return DjangoFilterBackend(*args, **kwargs)
class SearchFilter(BaseFilterBackend):
# The URL query parameter used for the search.
search_param = api_settings.SEARCH_PARAM
template = 'rest_framework/filters/search.html'
lookup_prefixes = {
'^': 'istartswith',
'=': 'iexact',
'@': 'search',
'$': 'iregex',
}
search_title = _('Search')
search_description = _('A search term.')
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = request.query_params.get(self.search_param, '')
return params.replace(',', ' ').split()
def construct_search(self, field_name):
lookup = self.lookup_prefixes.get(field_name[0])
if lookup:
field_name = field_name[1:]
else:
lookup = 'icontains'
return LOOKUP_SEP.join([field_name, lookup])
def must_call_distinct(self, queryset, search_fields):
"""
Return True if 'distinct()' should be used to query the given lookups.
"""
for search_field in search_fields:
opts = queryset.model._meta
if search_field[0] in self.lookup_prefixes:
search_field = search_field[1:]
parts = search_field.split(LOOKUP_SEP)
for part in parts:
field = opts.get_field(part)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def filter_queryset(self, request, queryset, view):
search_fields = getattr(view, 'search_fields', None)
search_terms = self.get_search_terms(request)
if not search_fields or not search_terms:
return queryset
orm_lookups = [
self.construct_search(six.text_type(search_field))
for search_field in search_fields
]
base = queryset
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
queryset = queryset.filter(reduce(operator.or_, queries))
if self.must_call_distinct(queryset, search_fields):
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
# We try to avoid this if possible, for performance reasons.
queryset = distinct(queryset, base)
return queryset
def to_html(self, request, queryset, view):
if not getattr(view, 'search_fields', None):
return ''
term = self.get_search_terms(request)
term = term[0] if term else ''
context = {
'param': self.search_param,
'term': term
}
template = loader.get_template(self.template)
return template_render(template, context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.search_param,
required=False,
location='query',
schema=coreschema.String(
title=force_text(self.search_title),
description=force_text(self.search_description)
)
)
]
class OrderingFilter(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
ordering_title = _('Ordering')
ordering_description = _('Which field to use when ordering the results.')
template = 'rest_framework/filters/ordering.html'
def get_ordering(self, request, queryset, view):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
ordering = self.remove_invalid_fields(queryset, fields, view, request)
if ordering:
return ordering
# No ordering was included, or all the ordering fields were invalid
return self.get_default_ordering(view)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, six.string_types):
return (ordering,)
return ordering
def get_default_valid_fields(self, queryset, view, context={}):
# If `ordering_fields` is not specified, then we determine a default
# based on the serializer class, if one exists on the view.
if hasattr(view, 'get_serializer_class'):
try:
serializer_class = view.get_serializer_class()
except AssertionError:
# Raised by the default implementation if
# no serializer_class was found
serializer_class = None
else:
serializer_class = getattr(view, 'serializer_class', None)
if serializer_class is None:
msg = (
"Cannot use %s on a view which does not have either a "
"'serializer_class', an overriding 'get_serializer_class' "
"or 'ordering_fields' attribute."
)
raise ImproperlyConfigured(msg % self.__class__.__name__)
return [
(field.source or field_name, field.label)
for field_name, field in serializer_class(context=context).fields.items()
if not getattr(field, 'write_only', False) and not field.source == '*'
]
def get_valid_fields(self, queryset, view, context={}):
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
return self.get_default_valid_fields(queryset, view, context)
elif valid_fields == '__all__':
# View explicitly allows filtering on any model field
valid_fields = [
(field.name, field.verbose_name) for field in queryset.model._meta.fields
]
valid_fields += [
(key, key.title().split('__'))
for key in queryset.query.annotations.keys()
]
else:
valid_fields = [
(item, item) if isinstance(item, six.string_types) else item
for item in valid_fields
]
return valid_fields
def remove_invalid_fields(self, queryset, fields, view, request):
valid_fields = [item[0] for item in self.get_valid_fields(queryset, view, {'request': request})]
return [term for term in fields if term.lstrip('-') in valid_fields]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_template_context(self, request, queryset, view):
current = self.get_ordering(request, queryset, view)
current = None if current is None else current[0]
options = []
context = {
'request': request,
'current': current,
'param': self.ordering_param,
}
for key, label in self.get_valid_fields(queryset, view, context):
options.append((key, '%s - %s' % (label, _('ascending'))))
options.append(('-' + key, '%s - %s' % (label, _('descending'))))
context['options'] = options
return context
def to_html(self, request, queryset, view):
template = loader.get_template(self.template)
context = self.get_template_context(request, queryset, view)
return template_render(template, context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.ordering_param,
required=False,
location='query',
schema=coreschema.String(
title=force_text(self.ordering_title),
description=force_text(self.ordering_description)
)
)
]
class DjangoObjectPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def __init__(self):
assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'
perm_format = '%(app_label)s.view_%(model_name)s'
def filter_queryset(self, request, queryset, view):
# We want to defer this import until run-time, rather than import-time.
# See https://github.com/tomchristie/django-rest-framework/issues/4608
# (Also see #1624 for why we need to make this import explicitly)
from guardian.shortcuts import get_objects_for_user
extra = {}
user = request.user
model_cls = queryset.model
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': model_cls._meta.model_name
}
permission = self.perm_format % kwargs
if tuple(guardian.VERSION) >= (1, 3):
# Maintain behavior compatibility with versions prior to 1.3
extra = {'accept_global_perms': False}
else:
extra = {}
return get_objects_for_user(user, permission, queryset, **extra)
| gpl-3.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/test_forms.py | 65 | 8956 | from __future__ import unicode_literals
from importlib import import_module
from django import forms, http
from django.conf import settings
from django.db import models
from django.test import TestCase
from django.template.response import TemplateResponse
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = "POST" if POST else "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class TestModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'formtools'
class TestModelForm(forms.ModelForm):
class Meta:
model = TestModel
fields = '__all__'
TestModelFormSet = forms.models.modelformset_factory(TestModel, form=TestModelForm, extra=2,
fields='__all__')
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class TestWizardWithInitAttrs(TestWizard):
form_list = [Step1, Step2]
condition_dict = {'step2': True}
initial_dict = {'start': {'name': 'value1'}}
instance_dict = {'start': User()}
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {'start': Step1, 'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {'0': Step1, '1': Step2, 'finish': Step3})
testform = TestWizardWithInitAttrs.get_initkwargs()
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, '0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)])
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = TestModel()
testform = TestWizard.as_view([('start', TestModelForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
testform = TestWizardWithInitAttrs.as_view(
[('start', TestModelForm), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
TestWizardWithInitAttrs.instance_dict['start'])
def test_formset_instance(self):
request = get_request()
the_instance1, created = TestModel.objects.get_or_create(
name='test object 1')
the_instance2, created = TestModel.objects.get_or_create(
name='test object 2')
testform = TestWizard.as_view([('start', TestModelFormSet), ('step2', Step2)],
instance_dict={'start': TestModel.objects.filter(name='test object 1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
| apache-2.0 |
JT5D/scikit-learn | examples/plot_multilabel.py | 9 | 4299 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
# Convert list of tuples to a class indicator matrix first
Y_indicator = LabelBinarizer().fit(Y).transform(Y)
X = CCA(n_components=2).fit(X, Y_indicator).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where([0 in y for y in Y])
one_class = np.where([1 in y for y in Y])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
| bsd-3-clause |
Spiderlover/Toontown | toontown/safezone/GolfKart.py | 4 | 7887 | from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.showbase import PythonUtil
class GolfKart(StateData.StateData):
def __init__(self, safeZone, parentFSM, doneEvent, golfCourse):
StateData.StateData.__init__(self, doneEvent)
self.golfCourse = golfCourse
self.fsm = ClassicFSM.ClassicFSM('GolfKart', [
State.State('start',
self.enterStart,
self.exitStart, [
'requestBoard',
'trolleyHFA',
'trolleyTFA']),
State.State('trolleyHFA',
self.enterTrolleyHFA,
self.exitTrolleyHFA, [
'final']),
State.State('trolleyTFA',
self.enterTrolleyTFA,
self.exitTrolleyTFA, [
'final']),
State.State('requestBoard',
self.enterRequestBoard,
self.exitRequestBoard, [
'boarding']),
State.State('boarding',
self.enterBoarding,
self.exitBoarding, [
'boarded']),
State.State('boarded',
self.enterBoarded,
self.exitBoarded, [
'requestExit',
'trolleyLeaving',
'final']),
State.State('requestExit',
self.enterRequestExit,
self.exitRequestExit, [
'exiting',
'trolleyLeaving']),
State.State('trolleyLeaving',
self.enterTrolleyLeaving,
self.exitTrolleyLeaving, [
'final']),
State.State('exiting',
self.enterExiting,
self.exitExiting, [
'final']),
State.State('final',
self.enterFinal,
self.exitFinal, [
'start'])],
'start', 'final')
self.parentFSM = parentFSM
return None
def load(self):
self.parentFSM.getStateNamed('golfKartBlock').addChild(self.fsm)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
def unload(self):
self.parentFSM.getStateNamed('trolley').removeChild(self.fsm)
del self.fsm
del self.parentFSM
self.buttonModels.removeNode()
del self.buttonModels
del self.upButton
del self.downButton
del self.rolloverButton
def enter(self):
self.fsm.enterInitialState()
if base.localAvatar.hp > 0:
messenger.send('enterGolfKartOK_%d' % self.golfCourse)
self.fsm.request('requestBoard')
else:
self.fsm.request('trolleyHFA')
return None
def exit(self):
self.ignoreAll()
return None
def enterStart(self):
return None
def exitStart(self):
return None
def enterTrolleyHFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyHFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyHFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def enterTrolleyTFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyTFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyTFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def __handleNoTrolleyAck(self):
ntbDoneStatus = self.noTrolleyBox.doneStatus
if ntbDoneStatus == 'ok':
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
else:
self.notify.error('Unrecognized doneStatus: ' + str(ntbDoneStatus))
def enterRequestBoard(self):
return None
def handleRejectBoard(self):
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
def exitRequestBoard(self):
return None
def enterBoarding(self, nodePath):
camera.wrtReparentTo(nodePath)
heading = PythonUtil.fitDestAngle2Src(camera.getH(nodePath), 180)
self.cameraBoardTrack = LerpPosHprInterval(camera, 1.5, Point3(0, 18, 8), Point3(heading, -10, 0))
self.cameraBoardTrack.start()
return None
def exitBoarding(self):
self.ignore('boardedTrolley')
return None
def enterBoarded(self):
self.enableExitButton()
return None
def exitBoarded(self):
self.cameraBoardTrack.finish()
self.disableExitButton()
return None
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.TrolleyHopOff, text_fg=(1, 1, 0.65, 1), text_pos=(0, -0.23), text_scale=0.8, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0, 0, 0.8), scale=0.15, command=lambda self = self: self.fsm.request('requestExit'))
return
def disableExitButton(self):
self.exitButton.destroy()
def enterRequestExit(self):
messenger.send('trolleyExitButton')
return None
def exitRequestExit(self):
return None
def enterTrolleyLeaving(self):
self.acceptOnce('playMinigame', self.handlePlayMinigame)
self.acceptOnce('playGolf', self.handlePlayGolf)
return None
def handlePlayMinigame(self, zoneId, minigameId):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
doneStatus = {}
doneStatus['mode'] = 'minigame'
doneStatus['zoneId'] = zoneId
doneStatus['minigameId'] = minigameId
messenger.send(self.doneEvent, [doneStatus])
def handlePlayGolf(self, zoneId, courseId):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
doneStatus = {}
doneStatus['mode'] = 'golfcourse'
doneStatus['zoneId'] = zoneId
doneStatus['courseId'] = courseId
messenger.send(self.doneEvent, [doneStatus])
def exitTrolleyLeaving(self):
self.ignore('playMinigame')
taskMgr.remove('leavingCamera')
return None
def enterExiting(self):
return None
def handleOffTrolley(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
return None
def exitExiting(self):
return None
def enterFinal(self):
return None
def exitFinal(self):
return None
| mit |
atiro/nikola | nikola/plugins/compile/ipynb.py | 1 | 6893 | # -*- coding: utf-8 -*-
# Copyright © 2013-2016 Damián Avila, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on nbconvert."""
from __future__ import unicode_literals, print_function
import io
import os
import sys
try:
from nbconvert.exporters import HTMLExporter
import nbformat
current_nbformat = nbformat.current_nbformat
from jupyter_client import kernelspec
from traitlets.config import Config
flag = True
ipy_modern = True
except ImportError:
try:
import IPython
from IPython.nbconvert.exporters import HTMLExporter
if IPython.version_info[0] >= 3: # API changed with 3.0.0
from IPython import nbformat
current_nbformat = nbformat.current_nbformat
from IPython.kernel import kernelspec
ipy_modern = True
else:
import IPython.nbformat.current as nbformat
current_nbformat = 'json'
kernelspec = None
ipy_modern = False
from IPython.config import Config
flag = True
except ImportError:
flag = None
ipy_modern = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
from nikola.shortcodes import apply_shortcodes
class CompileIPynb(PageCompiler):
"""Compile IPynb into HTML."""
name = "ipynb"
friendly_name = "Jupyter/IPython Notebook"
demote_headers = True
default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
def set_site(self, site):
"""Set Nikola site."""
self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
super(CompileIPynb, self).set_site(site)
def compile_html_string(self, source, is_two_file=True):
"""Export notebooks as HTML strings."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
HTMLExporter.default_template = 'basic'
c = Config(self.site.config['IPYNB_CONFIG'])
exportHtml = HTMLExporter(config=c)
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
(body, resources) = exportHtml.from_notebook_node(nb_json)
return body
def compile_html(self, source, dest, is_two_file=True):
"""Compile source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf8") as out_file:
output = self.compile_html_string(source, is_two_file)
output = apply_shortcodes(output, self.site.shortcode_registry, self.site, source)
out_file.write(output)
def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Read metadata directly from ipynb file.
As ipynb file support arbitrary metadata as json, the metadata used by Nikola
will be assume to be in the 'nikola' subfield.
"""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
source = post.source_path
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
# Metadata might not exist in two-file posts or in hand-crafted
# .ipynb files.
return nb_json.get('metadata', {}).get('nikola', {})
def create_post(self, path, **kw):
"""Create a new post."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kernel = kw.pop('ipython_kernel', None)
# is_page is not needed to create the file
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if content.startswith("{"):
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
if ipy_modern:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
nb = nbformat.new_notebook()
nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
if kernelspec is not None:
if kernel is None:
kernel = self.default_kernel
self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
IPYNB_KERNELS = {}
ksm = kernelspec.KernelSpecManager()
for k in ksm.find_kernel_specs():
IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
IPYNB_KERNELS[k]['name'] = k
del IPYNB_KERNELS[k]['argv']
if kernel not in IPYNB_KERNELS:
self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
raise Exception('Unknown kernel "{0}"'.format(kernel))
nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
else:
# Older IPython versions don’t need kernelspecs.
pass
if onefile:
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
if ipy_modern:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')
| mit |
rbarlow/pulp | server/pulp/server/webservices/views/plugins.py | 14 | 5901 | from django.views.generic import View
from pulp.server.auth import authorization
from pulp.server.exceptions import MissingResource
from pulp.server.managers import factory
from pulp.server.webservices.views.decorators import auth_required
from pulp.server.webservices.views.util import (generate_json_response,
generate_json_response_with_pulp_encoder)
class DistributorResourceView(View):
"""
Views for a single distributor.
"""
@auth_required(authorization.READ)
def get(self, request, distributor_id):
"""
Return a response contaning serialized data for the specified distributor.
:param request : WSGI request object
:type request : django.core.handlers.wsgi.WSGIRequest
:param distributor_id: id of distributor to match
:type distributor_id: string
:return : Response containing serialized data for the specified distributor
:rtype : django.http.HttpResponse
:raises : MissingResource if distributor_id is not found
"""
manager = factory.plugin_manager()
all_distributors = manager.distributors()
for distributor in all_distributors:
if distributor['id'] == distributor_id:
distributor['_href'] = request.get_full_path()
return generate_json_response(distributor)
raise MissingResource(distributor_type_id=distributor_id)
class DistributorsView(View):
"""
Views for all distributors.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
Return response containing a serialized list of dicts, one for each distributor.
:param request: WSGI Request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return : Response containing a serialized list of dicts, one for each distributor
:rtype : django.http.HttpResponse
"""
manager = factory.plugin_manager()
all_distributors = manager.distributors()
for distributor in all_distributors:
distributor['_href'] = '/'.join([request.get_full_path().rstrip('/'),
distributor['id'], ''])
return generate_json_response(all_distributors)
class ImporterResourceView(View):
"""
Views for an individual importer.
"""
@auth_required(authorization.READ)
def get(self, request, importer_id):
"""
Return a response containing serialized data for the specified importer.
:param request : WSGI request object
:type request : django.core.handlers.wsgi.WSGIRequest
:param importer_id : name of importer to return information for
:type importer_id : string
:return : Response containing serialized data for specified importer
:rtype : django.http.HttpResponse
:raises : MissingResource if importer_id cannot be found
"""
manager = factory.plugin_manager()
all_importers = manager.importers()
for importer in all_importers:
if importer['id'] == importer_id:
importer['_href'] = request.get_full_path()
return generate_json_response(importer)
raise MissingResource(importer_type_id=importer_id)
class ImportersView(View):
"""
Views for all importers.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
Return a response containing a serialized list of importers present in the server.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return : Response containing a serialized list of dicts containing importer data
:rtype : django.http.HttpResponse
"""
manager = factory.plugin_manager()
all_importers = manager.importers()
for importer in all_importers:
importer['_href'] = '/'.join([request.get_full_path().rstrip('/'), importer['id'], ''])
return generate_json_response(all_importers)
class TypeResourceView(View):
"""
View for dealing with a specific plugin type.
"""
@auth_required(authorization.READ)
def get(self, request, type_id):
"""
Return a single type definition.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequst
:return : Serialized response containing a type definition
:rtype : HttpResponse
:raises : MissingResource if type_id is not found
"""
manager = factory.plugin_manager()
all_types = manager.types()
for plugin_type in all_types:
if plugin_type['id'] == type_id:
plugin_type['_href'] = request.get_full_path()
return generate_json_response_with_pulp_encoder(plugin_type)
raise MissingResource(type=type_id)
class TypesView(View):
"""
View for dealing with all plugin types.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
Get all type definitions
:param request: WSGI Request obect
:type request: django.core.handlers.wsgi.WSGIRequest
:return : Response containing serialized list data for all available content types
:rtype : django.http.HttpResponse
"""
manager = factory.plugin_manager()
type_defs = manager.types()
for type_definition in type_defs:
href = {'_href': '/'.join([request.get_full_path().rstrip('/'),
type_definition['id'], ''])}
type_definition.update(href)
return generate_json_response_with_pulp_encoder(type_defs)
| gpl-2.0 |
joaquimrocha/Rancho | rancho/south/management/commands/startmigration.py | 2 | 43745 | """
Startmigration command, version 2.
"""
import sys
import os
import re
import string
import random
import inspect
import parser
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import models
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.contrib.contenttypes.generic import GenericRelation
from django.db.models.fields import FieldDoesNotExist
from django.conf import settings
try:
set
except NameError:
from sets import Set as set
from south import migration, modelsinspector
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--model', action='append', dest='added_model_list', type='string',
help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --model parameters.'),
make_option('--add-field', action='append', dest='added_field_list', type='string',
help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--add-index', action='append', dest='added_index_list', type='string',
help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--initial', action='store_true', dest='initial', default=False,
help='Generate the initial schema for the app.'),
make_option('--auto', action='store_true', dest='auto', default=False,
help='Attempt to automatically detect differences from the last migration.'),
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified model(s). Pass in either an app name (to freeze the whole app) or a single model, as appname.modelname.'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Creates a new template migration for the given app"
usage_str = "Usage: ./manage.py startmigration appname migrationname [--initial] [--auto] [--model ModelName] [--add-field ModelName.field_name] [--freeze] [--stdout]"
def handle(self, app=None, name="", added_model_list=None, added_field_list=None, initial=False, freeze_list=None, auto=False, stdout=False, added_index_list=None, **options):
# Any supposed lists that are None become empty lists
added_model_list = added_model_list or []
added_field_list = added_field_list or []
added_index_list = added_index_list or []
# --stdout means name = -
if stdout:
name = "-"
# Make sure options are compatable
if initial and (added_model_list or added_field_list or auto):
print "You cannot use --initial and other options together"
print self.usage_str
return
if auto and (added_model_list or added_field_list or initial):
print "You cannot use --auto and other options together"
print self.usage_str
return
# specify the default name 'initial' if a name wasn't specified and we're
# doing a migration for an entire app
if not name and initial:
name = 'initial'
# if not name, there's an error
if not name:
print "You must name this migration"
print self.usage_str
return
if not app:
print "Please provide an app in which to create the migration."
print self.usage_str
return
# Make sure the app is short form
app = app.split(".")[-1]
# See if the app exists
app_models_module = models.get_app(app)
if not app_models_module:
print "App '%s' doesn't seem to exist, isn't in INSTALLED_APPS, or has no models." % app
print self.usage_str
return
# If they've set SOUTH_AUTO_FREEZE_APP = True (or not set it - defaults to True)
if not hasattr(settings, 'SOUTH_AUTO_FREEZE_APP') or settings.SOUTH_AUTO_FREEZE_APP:
if freeze_list and app not in freeze_list:
freeze_list += [app]
else:
freeze_list = [app]
# Make the migrations directory if it's not there
app_module_path = app_models_module.__name__.split('.')[0:-1]
try:
app_module = __import__('.'.join(app_module_path), {}, {}, [''])
except ImportError:
print "Couldn't find path to App '%s'." % app
print self.usage_str
return
migrations_dir = os.path.join(
os.path.dirname(app_module.__file__),
"migrations",
)
# Make sure there's a migrations directory and __init__.py
if not os.path.isdir(migrations_dir):
print "Creating migrations directory at '%s'..." % migrations_dir
os.mkdir(migrations_dir)
init_path = os.path.join(migrations_dir, "__init__.py")
if not os.path.isfile(init_path):
# Touch the init py file
print "Creating __init__.py in '%s'..." % migrations_dir
open(init_path, "w").close()
# See what filename is next in line. We assume they use numbers.
migrations = migration.get_migration_names(migration.get_app(app))
highest_number = 0
for migration_name in migrations:
try:
number = int(migration_name.split("_")[0])
highest_number = max(highest_number, number)
except ValueError:
pass
# Make the new filename
new_filename = "%04i%s_%s.py" % (
highest_number + 1,
"".join([random.choice(string.letters.lower()) for i in range(0)]), # Possible random stuff insertion
name,
)
# Find the source file encoding, using PEP 0263's method
encoding = None
first_two_lines = inspect.getsourcelines(app_models_module)[0][:2]
for line in first_two_lines:
if re.search("coding[:=]\s*([-\w.]+)", line):
encoding = line
# Initialise forwards, backwards and models to blank things
forwards = ""
backwards = ""
frozen_models = {} # Frozen models, used by the Fake ORM
complete_apps = set() # Apps that are completely frozen - useable for diffing.
# Sets of actions
added_models = set()
deleted_models = [] # Special: contains instances _not_ string keys
added_fields = set()
deleted_fields = [] # Similar to deleted_models
changed_fields = [] # (mkey, fname, old_def, new_def)
added_uniques = set() # (mkey, field_names)
deleted_uniques = set() # (mkey, field_names)
added_indexes = set()
deleted_indexes = []
# --initial means 'add all models in this app'.
if initial:
for model in models.get_models(app_models_module):
added_models.add("%s.%s" % (app, model._meta.object_name))
# Added models might be 'model' or 'app.model'.
for modelname in added_model_list:
if "." in modelname:
added_models.add(modelname)
else:
added_models.add("%s.%s" % (app, modelname))
# Fields need translating from "model.field" to (app.model, field)
for fielddef in added_field_list:
try:
modelname, fieldname = fielddef.split(".", 1)
except ValueError:
print "The field specification '%s' is not in modelname.fieldname format." % fielddef
else:
added_fields.add(("%s.%s" % (app, modelname), fieldname))
# same thing as above, but for indexes
for fielddef in added_index_list:
try:
modelname, fieldname = fielddef.split(".", 1)
except ValueError:
print "The field specification '%s' is not in modelname.fieldname format." % fielddef
else:
added_indexes.add(("%s.%s" % (app, modelname), fieldname))
# Add anything frozen (I almost called the dict Iceland...)
if freeze_list:
for item in freeze_list:
if "." in item:
# It's a specific model
app_name, model_name = item.split(".", 1)
model = models.get_model(app_name, model_name)
if model is None:
print "Cannot find the model '%s' to freeze it." % item
print self.usage_str
return
frozen_models[model] = None
else:
# Get everything in an app!
frozen_models.update(dict([(x, None) for x in models.get_models(models.get_app(item))]))
complete_apps.add(item.split(".")[-1])
# For every model in the freeze list, add in frozen dependencies
for model in list(frozen_models):
frozen_models.update(model_dependencies(model))
### Automatic Detection ###
if auto:
# Get the last migration for this app
last_models = None
app_module = migration.get_app(app)
if app_module is None:
print "You cannot use automatic detection on the first migration of an app. Try --initial instead."
else:
migrations = list(migration.get_migration_classes(app_module))
if not migrations:
print "You cannot use automatic detection on the first migration of an app. Try --initial instead."
else:
if hasattr(migrations[-1], "complete_apps") and \
app in migrations[-1].complete_apps:
last_models = migrations[-1].models
last_orm = migrations[-1].orm
else:
print "You cannot use automatic detection, since the previous migration does not have this whole app frozen.\nEither make migrations using '--freeze %s' or set 'SOUTH_AUTO_FREEZE_APP = True' in your settings.py." % app
# Right, did we manage to get the last set of models?
if last_models is None:
print self.usage_str
return
new = dict([
(model_key(model), prep_for_freeze(model))
for model in models.get_models(app_models_module)
if (
not getattr(model._meta, "proxy", False) and \
getattr(model._meta, "managed", True) and \
not getattr(model._meta, "abstract", False)
)
])
# And filter other apps out of the old
old = dict([
(key, fields)
for key, fields in last_models.items()
if key.split(".", 1)[0] == app
])
am, dm, cm, af, df, cf, afu, dfu = models_diff(old, new)
# For models that were there before and after, do a meta diff
was_meta_change = False
for mkey in cm:
au, du = meta_diff(old[mkey].get("Meta", {}), new[mkey].get("Meta", {}))
for entry in au:
added_uniques.add((mkey, entry))
was_meta_change = True
for entry in du:
deleted_uniques.add((mkey, entry, last_orm[mkey]))
was_meta_change = True
if not (am or dm or af or df or cf or afu or dfu or was_meta_change):
print "Nothing seems to have changed."
return
# Add items to the todo lists
added_models.update(am)
added_fields.update(af)
changed_fields.extend([(m, fn, ot, nt, last_orm) for m, fn, ot, nt in cf])
# Deleted models are from the past, and so we use instances instead.
for mkey in dm:
model = last_orm[mkey]
fields = last_models[mkey]
if "Meta" in fields:
del fields['Meta']
deleted_models.append((model, fields, last_models))
# For deleted fields, we tag the instance on the end too
for mkey, fname in df:
deleted_fields.append((
mkey,
fname,
last_orm[mkey]._meta.get_field_by_name(fname)[0],
last_models[mkey][fname],
last_models,
))
# Uniques need merging
added_uniques = added_uniques.union(afu)
for mkey, entry in dfu:
deleted_uniques.add((mkey, entry, last_orm[mkey]))
### Added model ###
for mkey in added_models:
print " + Added model '%s'" % (mkey,)
model = model_unkey(mkey)
# Add the model's dependencies to the frozens
frozen_models.update(model_dependencies(model))
# Get the field definitions
fields = modelsinspector.get_model_fields(model)
# Turn the (class, args, kwargs) format into a string
fields = triples_to_defs(app, model, fields)
# Make the code
forwards += CREATE_TABLE_SNIPPET % (
model._meta.object_name,
model._meta.db_table,
"\n ".join(["('%s', orm[%r])," % (fname, mkey + ":" + fname) for fname, fdef in fields.items()]),
model._meta.app_label,
model._meta.object_name,
)
# And the backwards code
backwards += DELETE_TABLE_SNIPPET % (
model._meta.object_name,
model._meta.db_table
)
# Now add M2M fields to be done
for field in model._meta.local_many_to_many:
added_fields.add((mkey, field.attname))
# And unique_togethers to be added
for ut in model._meta.unique_together:
added_uniques.add((mkey, tuple(ut)))
### Added fields ###
for mkey, field_name in added_fields:
# Get the model
model = model_unkey(mkey)
# Get the field
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
print "Model '%s' doesn't have a field '%s'" % (mkey, field_name)
return
# ManyToMany fields need special attention.
if isinstance(field, models.ManyToManyField):
if not field.rel.through: # Bug #120
# Add a frozen model for each side
frozen_models[model] = None
frozen_models[field.rel.to] = None
# And a field defn, that's actually a table creation
forwards += CREATE_M2MFIELD_SNIPPET % (
model._meta.object_name,
field.name,
field.m2m_db_table(),
field.m2m_column_name()[:-3], # strip off the '_id' at the end
poss_ormise(app, model, model._meta.object_name),
field.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
poss_ormise(app, field.rel.to, field.rel.to._meta.object_name)
)
backwards += DELETE_M2MFIELD_SNIPPET % (
model._meta.object_name,
field.name,
field.m2m_db_table()
)
print " + Added M2M '%s.%s'" % (mkey, field_name)
continue
# GenericRelations need ignoring
if isinstance(field, GenericRelation):
continue
print " + Added field '%s.%s'" % (mkey, field_name)
# Add any dependencies
frozen_models.update(field_dependencies(field))
# Work out the definition
triple = remove_useless_attributes(
modelsinspector.get_model_fields(model)[field_name])
field_definition = make_field_constructor(app, field, triple)
forwards += CREATE_FIELD_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.name,
"orm[%r]" % (mkey + ":" + field.name),
)
backwards += DELETE_FIELD_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.column,
)
### Deleted fields ###
for mkey, field_name, field, triple, last_models in deleted_fields:
print " - Deleted field '%s.%s'" % (mkey, field_name)
# Get the model
model = model_unkey(mkey)
# ManyToMany fields need special attention.
if isinstance(field, models.ManyToManyField):
# And a field defn, that's actually a table deletion
forwards += DELETE_M2MFIELD_SNIPPET % (
model._meta.object_name,
field.name,
field.m2m_db_table()
)
backwards += CREATE_M2MFIELD_SNIPPET % (
model._meta.object_name,
field.name,
field.m2m_db_table(),
field.m2m_column_name()[:-3], # strip off the '_id' at the end
poss_ormise(app, model, model._meta.object_name),
field.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
poss_ormise(app, field.rel.to, field.rel.to._meta.object_name)
)
continue
# Work out the definition
triple = remove_useless_attributes(triple)
field_definition = make_field_constructor(app, field, triple)
forwards += DELETE_FIELD_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.column,
)
backwards += CREATE_FIELD_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.name,
"orm[%r]" % (mkey + ":" + field.name),
)
### Deleted model ###
for model, fields, last_models in deleted_models:
print " - Deleted model '%s.%s'" % (model._meta.app_label,model._meta.object_name)
# Turn the (class, args, kwargs) format into a string
fields = triples_to_defs(app, model, fields)
# Make the code
forwards += DELETE_TABLE_SNIPPET % (
model._meta.object_name,
model._meta.db_table
)
# And the backwards code
backwards += CREATE_TABLE_SNIPPET % (
model._meta.object_name,
model._meta.db_table,
"\n ".join(["('%s', orm[%r])," % (fname, mkey + ":" + fname) for fname, fdef in fields.items()]),
model._meta.app_label,
model._meta.object_name,
)
### Added indexes. going here, since it might add to added_uniques ###
for mkey, field_name in added_indexes:
# Get the model
model = model_unkey(mkey)
# Get the field
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
print "Model '%s' doesn't have a field '%s'" % (mkey, field_name)
return
if field.unique:
ut = (mkey, (field.name,))
added_uniques.add(ut)
elif field.db_index:
# Create migrations
forwards += CREATE_INDEX_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.name,
)
backwards += DELETE_INDEX_SNIPPET % (
model._meta.object_name,
field.name,
model._meta.db_table,
field.column,
)
print " + Added index for '%s.%s'" % (mkey, field_name)
else:
print "Field '%s.%s' does not have db_index or unique set to True" % (mkey, field_name)
return
### Changed fields ###
for mkey, field_name, old_triple, new_triple, last_orm in changed_fields:
model = model_unkey(mkey)
old_def = triples_to_defs(app, model, {
field_name: old_triple,
})[field_name]
new_def = triples_to_defs(app, model, {
field_name: new_triple,
})[field_name]
# We need to create the fields, to see if it needs _id, or if it's an M2M
field = model._meta.get_field_by_name(field_name)[0]
old_field = last_orm[mkey + ":" + field_name]
if field.column != old_field.column:
forwards += RENAME_COLUMN_SNIPPET % {
"field_name": field_name,
"old_column": old_field.column,
"new_column": field.column,
}
if hasattr(field, "m2m_db_table"):
# See if anything has ACTUALLY changed
if old_triple[1] != new_triple[1]:
print " ! Detected change to the target model of M2M field '%s.%s'. South can't handle this; leaving this change out." % (mkey, field_name)
continue
print " ~ Changed field '%s.%s'." % (mkey, field_name)
forwards += CHANGE_FIELD_SNIPPET % (
model._meta.object_name,
field_name,
new_def,
model._meta.db_table,
field.get_attname(),
"orm[%r]" % (mkey + ":" + field.name),
)
backwards += CHANGE_FIELD_SNIPPET % (
model._meta.object_name,
field_name,
old_def,
model._meta.db_table,
field.get_attname(),
"orm[%r]" % (mkey + ":" + field.name),
)
if field.column != old_field.column:
backwards += RENAME_COLUMN_SNIPPET % {
"field_name": field_name,
"old_column": field.column,
"new_column": old_field.column,
}
### Added unique_togethers ###
for mkey, ut in added_uniques:
model = model_unkey(mkey)
if len(ut) == 1:
print " + Added unique for %s on %s." % (", ".join(ut), model._meta.object_name)
else:
print " + Added unique_together for [%s] on %s." % (", ".join(ut), model._meta.object_name)
cols = [get_field_column(model, f) for f in ut]
forwards += CREATE_UNIQUE_SNIPPET % (
", ".join(ut),
model._meta.object_name,
model._meta.db_table,
cols,
)
backwards = DELETE_UNIQUE_SNIPPET % (
", ".join(ut),
model._meta.object_name,
model._meta.db_table,
cols,
) + backwards
### Deleted unique_togethers ###
for mkey, ut, model in deleted_uniques:
if len(ut) == 1:
print " - Deleted unique for %s on %s." % (", ".join(ut), model._meta.object_name)
else:
print " - Deleted unique_together for [%s] on %s." % (", ".join(ut), model._meta.object_name)
cols = [get_field_column(model, f) for f in ut]
forwards = DELETE_UNIQUE_SNIPPET % (
", ".join(ut),
model._meta.object_name,
model._meta.db_table,
cols,
) + forwards
backwards += CREATE_UNIQUE_SNIPPET % (
", ".join(ut),
model._meta.object_name,
model._meta.db_table,
cols,
)
# Default values for forwards/backwards
if (not forwards) and (not backwards):
forwards = '"Write your forwards migration here"'
backwards = '"Write your backwards migration here"'
all_models = {}
# Fill out frozen model definitions
for model, last_models in frozen_models.items():
if hasattr(model._meta, "proxy") and model._meta.proxy:
model = model._meta.proxy_for_model
all_models[model_key(model)] = prep_for_freeze(model, last_models)
# Do some model cleanup, and warnings
for modelname, model in all_models.items():
for fieldname, fielddef in model.items():
# Remove empty-after-cleaning Metas.
if fieldname == "Meta" and not fielddef:
del model['Meta']
# Warn about undefined fields
elif fielddef is None:
print "WARNING: Cannot get definition for '%s' on '%s'. Please edit the migration manually to define it, or add the south_field_triple method to it." % (
fieldname,
modelname,
)
model[fieldname] = FIELD_NEEDS_DEF_SNIPPET
# So, what's in this file, then?
file_contents = MIGRATION_SNIPPET % (
encoding or "", '.'.join(app_module_path),
forwards,
backwards,
pprint_frozen_models(all_models),
complete_apps and "complete_apps = [%s]" % (", ".join(map(repr, complete_apps))) or ""
)
# - is a special name which means 'print to stdout'
if name == "-":
print file_contents
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations_dir, new_filename), "w")
fp.write(file_contents)
fp.close()
print "Created %s." % new_filename
### Cleaning functions for freezing
def ormise_triple(field, triple):
"Given a 'triple' definition, runs poss_ormise on each arg."
# If it's a string defn, return it plain.
if not isinstance(triple, (list, tuple)):
return triple
# For each arg, if it's a related type, try ORMising it.
args = []
for arg in triple[1]:
if hasattr(field, "rel") and hasattr(field.rel, "to") and field.rel.to:
args.append(poss_ormise(None, field.rel.to, arg))
else:
args.append(arg)
return (triple[0], args, triple[2])
def prep_for_freeze(model, last_models=None):
# If we have a set of models to use, use them.
if last_models:
fields = last_models[model_key(model)]
else:
fields = modelsinspector.get_model_fields(model, m2m=True)
# Remove _stub if it stuck in
if "_stub" in fields:
del fields["_stub"]
# Remove useless attributes (like 'choices')
for name, field in fields.items():
if name == "Meta":
continue
real_field = model._meta.get_field_by_name(name)[0]
fields[name] = ormise_triple(real_field, remove_useless_attributes(field))
# See if there's a Meta
if last_models:
meta = last_models[model_key(model)].get("Meta", {})
else:
meta = modelsinspector.get_model_meta(model)
if meta:
fields['Meta'] = remove_useless_meta(meta)
return fields
### Module handling functions
def model_key(model):
"For a given model, return 'appname.modelname'."
return "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
def model_unkey(key):
"For 'appname.modelname', return the model."
app, modelname = key.split(".", 1)
model = models.get_model(app, modelname)
if not model:
print "Couldn't find model '%s' in app '%s'" % (modelname, app)
sys.exit(1)
return model
### Dependency resolvers
def model_dependencies(model, last_models=None, checked_models=None):
"""
Returns a set of models this one depends on to be defined; things like
OneToOneFields as ID, ForeignKeys everywhere, etc.
"""
depends = {}
checked_models = checked_models or set()
# Get deps for each field
for field in model._meta.fields + model._meta.many_to_many:
depends.update(field_dependencies(field, last_models))
# Now recurse
new_to_check = set(depends.keys()) - checked_models
while new_to_check:
checked_model = new_to_check.pop()
if checked_model == model or checked_model in checked_models:
continue
checked_models.add(checked_model)
deps = model_dependencies(checked_model, last_models, checked_models)
# Loop through dependencies...
for dep, value in deps.items():
# If the new dep is not already checked, add to the queue
if (dep not in depends) and (dep not in new_to_check) and (dep not in checked_models):
new_to_check.add(dep)
depends[dep] = value
return depends
def field_dependencies(field, last_models=None, checked_models=None):
checked_models = checked_models or set()
depends = {}
if isinstance(field, (models.OneToOneField, models.ForeignKey, models.ManyToManyField, GenericRelation)):
if field.rel.to in checked_models:
return depends
checked_models.add(field.rel.to)
depends[field.rel.to] = last_models
depends.update(field_dependencies(field.rel.to._meta.pk, last_models, checked_models))
return depends
### Prettyprinters
def pprint_frozen_models(models):
return "{\n %s\n }" % ",\n ".join([
"%r: %s" % (name, pprint_fields(fields))
for name, fields in sorted(models.items())
])
def pprint_fields(fields):
return "{\n %s\n }" % ",\n ".join([
"%r: %r" % (name, defn)
for name, defn in sorted(fields.items())
])
### Output sanitisers
USELESS_KEYWORDS = ["choices", "help_text", "upload_to", "verbose_name"]
USELESS_DB_KEYWORDS = ["related_name", "default"] # Important for ORM, not for DB.
def remove_useless_attributes(field, db=False):
"Removes useless (for database) attributes from the field's defn."
keywords = db and USELESS_DB_KEYWORDS or USELESS_KEYWORDS
if field:
for name in keywords:
if name in field[2]:
del field[2][name]
return field
USELESS_META = ["verbose_name", "verbose_name_plural"]
def remove_useless_meta(meta):
"Removes useless (for database) attributes from the table's meta."
if meta:
for name in USELESS_META:
if name in meta:
del meta[name]
return meta
### Turns (class, args, kwargs) triples into function defs.
def make_field_constructor(default_app, field, triple):
"""
Given the defualt app, the field class,
and the defn triple (or string), make the definition string.
"""
# It might be None; return a placeholder
if triple is None:
return FIELD_NEEDS_DEF_SNIPPET
# It might be a defn string already...
if isinstance(triple, (str, unicode)):
return triple
# OK, do it the hard way
if hasattr(field, "rel") and hasattr(field.rel, "to") and field.rel.to:
rel_to = field.rel.to
else:
rel_to = None
args = [poss_ormise(default_app, rel_to, arg) for arg in triple[1]]
kwds = ["%s=%s" % (k, poss_ormise(default_app, rel_to, v)) for k,v in triple[2].items()]
return "%s(%s)" % (triple[0], ", ".join(args+kwds))
QUOTES = ['"""', "'''", '"', "'"]
def poss_ormise(default_app, rel_to, arg):
"""
Given the name of something that needs orm. stuck on the front and
a python eval-able string, possibly add orm. to it.
"""
orig_arg = arg
# If it's not a relative field, short-circuit out
if not rel_to:
return arg
# Get the name of the other model
rel_name = rel_to._meta.object_name
# Is it in a different app? If so, use proper addressing.
if rel_to._meta.app_label != default_app:
real_name = "orm['%s.%s']" % (rel_to._meta.app_label, rel_name)
else:
real_name = "orm.%s" % rel_name
# If it's surrounded by quotes, get rid of those
for quote_type in QUOTES:
l = len(quote_type)
if arg[:l] == quote_type and arg[-l:] == quote_type:
arg = arg[l:-l]
break
# Now see if we can replace it.
if arg.lower() == rel_name.lower():
return real_name
# Or perhaps it's app.model?
if arg.lower() == rel_to._meta.app_label.lower() + "." + rel_name.lower():
return real_name
# Or perhaps it's 'self'?
if arg == RECURSIVE_RELATIONSHIP_CONSTANT:
return real_name
return orig_arg
### Diffing functions between sets of models
def models_diff(old, new):
"""
Returns the difference between the old and new sets of models as a 5-tuple:
added_models, deleted_models, added_fields, deleted_fields, changed_fields
"""
added_models = set()
deleted_models = set()
ignored_models = set() # Stubs for backwards
continued_models = set() # Models that existed before and after
added_fields = set()
deleted_fields = set()
changed_fields = []
added_uniques = set()
deleted_uniques = set()
# See if anything's vanished
for key in old:
if key not in new:
if "_stub" not in old[key]:
deleted_models.add(key)
else:
ignored_models.add(key)
# Or appeared
for key in new:
if key not in old:
added_models.add(key)
# Now, for every model that's stayed the same, check its fields.
for key in old:
if key not in deleted_models and key not in ignored_models:
continued_models.add(key)
still_there = set()
# Find fields that have vanished.
for fieldname in old[key]:
if fieldname != "Meta" and fieldname not in new[key]:
deleted_fields.add((key, fieldname))
else:
still_there.add(fieldname)
# And ones that have appeared
for fieldname in new[key]:
if fieldname != "Meta" and fieldname not in old[key]:
added_fields.add((key, fieldname))
# For the ones that exist in both models, see if they were changed
for fieldname in still_there:
if fieldname != "Meta":
if different_attributes(
remove_useless_attributes(old[key][fieldname], True),
remove_useless_attributes(new[key][fieldname], True)):
changed_fields.append((key, fieldname, old[key][fieldname], new[key][fieldname]))
# See if their uniques have changed
old_triple = old[key][fieldname]
new_triple = new[key][fieldname]
if is_triple(old_triple) and is_triple(new_triple):
if old_triple[2].get("unique", "False") != new_triple[2].get("unique", "False"):
# Make sure we look at the one explicitly given to see what happened
if "unique" in old_triple[2]:
if old_triple[2]['unique'] == "False":
added_uniques.add((key, (fieldname,)))
else:
deleted_uniques.add((key, (fieldname,)))
else:
if new_triple[2]['unique'] == "False":
deleted_uniques.add((key, (fieldname,)))
else:
added_uniques.add((key, (fieldname,)))
return added_models, deleted_models, continued_models, added_fields, deleted_fields, changed_fields, added_uniques, deleted_uniques
def is_triple(triple):
"Returns whether the argument is a triple."
return isinstance(triple, (list, tuple)) and len(triple) == 3 and \
isinstance(triple[0], (str, unicode)) and \
isinstance(triple[1], (list, tuple)) and \
isinstance(triple[2], dict)
def different_attributes(old, new):
"""
Backwards-compat comparison that ignores orm. on the RHS and not the left
and which knows django.db.models.fields.CharField = models.CharField.
Has a whole load of tests in tests/autodetectoion.py.
"""
# If they're not triples, just do normal comparison
if not is_triple(old) or not is_triple(new):
return old != new
# Expand them out into parts
old_field, old_pos, old_kwd = old
new_field, new_pos, new_kwd = new
# Copy the positional and keyword arguments so we can compare them and pop off things
old_pos, new_pos = old_pos[:], new_pos[:]
old_kwd = dict(old_kwd.items())
new_kwd = dict(new_kwd.items())
# Remove comparison of the existence of 'unique', that's done elsewhere.
# TODO: Make this work for custom fields where unique= means something else?
if "unique" in old_kwd:
del old_kwd['unique']
if "unique" in new_kwd:
del new_kwd['unique']
# If the first bit is different, check it's not by dj.db.models...
if old_field != new_field:
if old_field.startswith("models.") and (new_field.startswith("django.db.models") \
or new_field.startswith("django.contrib.gis")):
if old_field.split(".")[-1] != new_field.split(".")[-1]:
return True
else:
# Remove those fields from the final comparison
old_field = new_field = ""
# If there's a positional argument in the first, and a 'to' in the second,
# see if they're actually comparable.
if (old_pos and "to" in new_kwd) and ("orm" in new_kwd['to'] and "orm" not in old_pos[0]):
# Do special comparison to fix #153
try:
if old_pos[0] != new_kwd['to'].split("'")[1].split(".")[1]:
return True
except IndexError:
pass # Fall back to next comparison
# Remove those attrs from the final comparison
old_pos = old_pos[1:]
del new_kwd['to']
return old_field != new_field or old_pos != new_pos or old_kwd != new_kwd
def meta_diff(old, new):
"""
Diffs the two provided Meta definitions (dicts).
"""
# First, diff unique_together
old_unique_together = eval(old.get('unique_together', "[]"))
new_unique_together = eval(new.get('unique_together', "[]"))
added_uniques = set()
removed_uniques = set()
for entry in old_unique_together:
if entry not in new_unique_together:
removed_uniques.add(tuple(entry))
for entry in new_unique_together:
if entry not in old_unique_together:
added_uniques.add(tuple(entry))
return added_uniques, removed_uniques
### Used to work out what columns any fields affect ###
def get_field_column(model, field_name):
return model._meta.get_field_by_name(field_name)[0].column
### Creates SQL snippets for various common operations
def triples_to_defs(app, model, fields):
# Turn the (class, args, kwargs) format into a string
for field, triple in fields.items():
triple = remove_useless_attributes(triple)
if triple is None:
print "WARNING: Cannot get definition for '%s' on '%s'. Please edit the migration manually." % (
field,
model_key(model),
)
fields[field] = FIELD_NEEDS_DEF_SNIPPET
else:
fields[field] = make_field_constructor(
app,
model._meta.get_field_by_name(field)[0],
triple,
)
return fields
### Various code snippets we need to use
MIGRATION_SNIPPET = """%s
from south.db import db
from django.db import models
from %s.models import *
class Migration:
def forwards(self, orm):
%s
def backwards(self, orm):
%s
models = %s
%s
"""
CREATE_TABLE_SNIPPET = '''
# Adding model '%s'
db.create_table(%r, (
%s
))
db.send_create_signal(%r, [%r])
'''
DELETE_TABLE_SNIPPET = '''
# Deleting model '%s'
db.delete_table(%r)
'''
CREATE_FIELD_SNIPPET = '''
# Adding field '%s.%s'
db.add_column(%r, %r, %s)
'''
DELETE_FIELD_SNIPPET = '''
# Deleting field '%s.%s'
db.delete_column(%r, %r)
'''
CHANGE_FIELD_SNIPPET = '''
# Changing field '%s.%s'
# (to signature: %s)
db.alter_column(%r, %r, %s)
'''
CREATE_M2MFIELD_SNIPPET = '''
# Adding ManyToManyField '%s.%s'
db.create_table('%s', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('%s', models.ForeignKey(%s, null=False)),
('%s', models.ForeignKey(%s, null=False))
))
'''
DELETE_M2MFIELD_SNIPPET = '''
# Dropping ManyToManyField '%s.%s'
db.delete_table('%s')
'''
CREATE_UNIQUE_SNIPPET = '''
# Creating unique_together for [%s] on %s.
db.create_unique(%r, %r)
'''
DELETE_UNIQUE_SNIPPET = '''
# Deleting unique_together for [%s] on %s.
db.delete_unique(%r, %r)
'''
RENAME_COLUMN_SNIPPET = '''
# Renaming column for field '%(field_name)s'.
db.rename_column(%(old_column)r, %(new_column)r)
'''
FIELD_NEEDS_DEF_SNIPPET = "<< PUT FIELD DEFINITION HERE >>"
CREATE_INDEX_SNIPPET = '''
# Adding index on '%s.%s'
db.create_index(%r, [%r])
'''
DELETE_INDEX_SNIPPET = '''
# Deleting index on '%s.%s'
db.delete_index(%r, [%r])
''' | agpl-3.0 |
rangertaha/salt-manager | salt-manager/webapp/apps/management/commands/bshell.py | 1 | 3463 | #!/usr/bin/env python
"""
"""
import os
from optparse import make_option
from django.core.management.base import NoArgsCommand
def starting_imports():
from django.db.models.loading import get_models
for m in get_models():
exec "from %s import %s" % (m.__module__, m.__name__)
from datetime import datetime, timedelta
sdt = datetime.today().date()
edt = sdt + timedelta(days=1)
return locals()
def start_plain_shell(use_plain):
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
def start_ipython_shell():
from IPython.Shell import IPShell
import IPython
# Explicitly pass an empty list as arguments, because otherwise IPython
# would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
def start_bpython_shell():
from bpython import cli
cli.main(args=[], locals_=starting_imports())
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
make_option('--ipython', action='store_true', dest='ipython',
help='Tells Django to use ipython.'),
make_option('--bpython', action='store_true', dest='bpython',
help='Tells Django to use bpython.'),
)
help = "Runs a Python interactive interpreter. Tries to use bPython, if it's available."
requires_model_validation = False
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
elif use_ipython:
start_ipython_shell()
elif use_bpython:
start_bpython_shell()
else:
start_bpython_shell()
except ImportError:
# fallback to plain shell if we encounter an ImportError
start_plain_shell(use_plain)
| mit |
tersmitten/ansible | lib/ansible/modules/cloud/vmware/vmware_host_ntp.py | 26 | 15749 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ntp
short_description: Manage NTP server configuration of an ESXi host
description:
- This module can be used to configure, add or remove NTP servers from an ESXi host.
- If C(state) is not given, the NTP servers will be configured in the exact sequence.
- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host system to work with.
- This parameter is required if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- This parameter is required if C(esxi_hostname) is not specified.
type: str
ntp_servers:
description:
- "IP or FQDN of NTP server(s)."
- This accepts a list of NTP servers. For multiple servers, please look at the examples.
type: list
required: True
state:
description:
- "present: Add NTP server(s), if specified server(s) are absent else do nothing."
- "absent: Remove NTP server(s), if specified server(s) are present else do nothing."
- Specified NTP server(s) will be configured if C(state) isn't specified.
choices: [ present, absent ]
type: str
verbose:
description:
- Verbose output of the configuration change.
- Explains if an NTP server was added, removed, or if the NTP server sequence was changed.
type: bool
required: false
default: false
version_added: 2.8
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Configure NTP servers for an ESXi Host
vmware_host_ntp:
hostname: vcenter01.example.local
username: administrator@vsphere.local
password: SuperSecretPassword
esxi_hostname: esx01.example.local
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Set NTP servers for all ESXi Host in given Cluster
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Set NTP servers for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Remove NTP servers for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: absent
ntp_servers:
- bad.server.ntp.org
delegate_to: localhost
'''
RETURN = r'''
results:
description: metadata about host system's NTP configuration
returned: always
type: dict
sample: {
"esx01.example.local": {
"ntp_servers_changed": ["time1.example.local", "time2.example.local", "time3.example.local", "time4.example.local"],
"ntp_servers": ["time3.example.local", "time4.example.local"],
"ntp_servers_previous": ["time1.example.local", "time2.example.local"],
},
"esx02.example.local": {
"ntp_servers_changed": ["time3.example.local"],
"ntp_servers_current": ["time1.example.local", "time2.example.local", "time3.example.local"],
"state": "present",
"ntp_servers_previous": ["time1.example.local", "time2.example.local"],
},
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareNtpConfigManager(PyVmomi):
"""Class to manage configured NTP servers"""
def __init__(self, module):
super(VmwareNtpConfigManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.ntp_servers = self.params.get('ntp_servers', list())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
self.results = {}
self.desired_state = self.params.get('state', None)
self.verbose = module.params.get('verbose', False)
def update_ntp_servers(self, host, ntp_servers_configured, ntp_servers_to_change, operation='overwrite'):
"""Update NTP server configuration"""
host_date_time_manager = host.configManager.dateTimeSystem
if host_date_time_manager:
# Prepare new NTP server list
if operation == 'overwrite':
new_ntp_servers = list(ntp_servers_to_change)
else:
new_ntp_servers = list(ntp_servers_configured)
if operation == 'add':
new_ntp_servers = new_ntp_servers + ntp_servers_to_change
elif operation == 'delete':
for server in ntp_servers_to_change:
if server in new_ntp_servers:
new_ntp_servers.remove(server)
# build verbose message
if self.verbose:
message = self.build_changed_message(
ntp_servers_configured,
new_ntp_servers,
ntp_servers_to_change,
operation
)
ntp_config_spec = vim.host.NtpConfig()
ntp_config_spec.server = new_ntp_servers
date_config_spec = vim.host.DateTimeConfig()
date_config_spec.ntpConfig = ntp_config_spec
try:
if not self.module.check_mode:
host_date_time_manager.UpdateDateTimeConfig(date_config_spec)
if self.verbose:
self.results[host.name]['msg'] = message
except vim.fault.HostConfigFault as config_fault:
self.module.fail_json(
msg="Failed to configure NTP for host '%s' due to : %s" %
(host.name, to_native(config_fault.msg))
)
return new_ntp_servers
def check_host_state(self):
"""Check ESXi host configuration"""
change_list = []
changed = False
for host in self.hosts:
self.results[host.name] = dict()
ntp_servers_configured, ntp_servers_to_change = self.check_ntp_servers(host=host)
# add/remove NTP servers
if self.desired_state:
self.results[host.name]['state'] = self.desired_state
if ntp_servers_to_change:
self.results[host.name]['ntp_servers_changed'] = ntp_servers_to_change
operation = 'add' if self.desired_state == 'present' else 'delete'
new_ntp_servers = self.update_ntp_servers(
host=host,
ntp_servers_configured=ntp_servers_configured,
ntp_servers_to_change=ntp_servers_to_change,
operation=operation
)
self.results[host.name]['ntp_servers_current'] = new_ntp_servers
self.results[host.name]['changed'] = True
change_list.append(True)
else:
self.results[host.name]['ntp_servers_current'] = ntp_servers_configured
if self.verbose:
self.results[host.name]['msg'] = (
"NTP servers already added" if self.desired_state == 'present'
else "NTP servers already removed"
)
self.results[host.name]['changed'] = False
change_list.append(False)
# overwrite NTP servers
else:
self.results[host.name]['ntp_servers'] = self.ntp_servers
if ntp_servers_to_change:
self.results[host.name]['ntp_servers_changed'] = self.get_differt_entries(
ntp_servers_configured,
ntp_servers_to_change
)
self.update_ntp_servers(
host=host,
ntp_servers_configured=ntp_servers_configured,
ntp_servers_to_change=ntp_servers_to_change,
operation='overwrite'
)
self.results[host.name]['changed'] = True
change_list.append(True)
else:
if self.verbose:
self.results[host.name]['msg'] = "NTP servers already configured"
self.results[host.name]['changed'] = False
change_list.append(False)
if any(change_list):
changed = True
self.module.exit_json(changed=changed, results=self.results)
def check_ntp_servers(self, host):
"""Check configured NTP servers"""
update_ntp_list = []
host_datetime_system = host.configManager.dateTimeSystem
if host_datetime_system:
ntp_servers_configured = host_datetime_system.dateTimeInfo.ntpConfig.server
# add/remove NTP servers
if self.desired_state:
for ntp_server in self.ntp_servers:
if self.desired_state == 'present' and ntp_server not in ntp_servers_configured:
update_ntp_list.append(ntp_server)
if self.desired_state == 'absent' and ntp_server in ntp_servers_configured:
update_ntp_list.append(ntp_server)
# overwrite NTP servers
else:
if ntp_servers_configured != self.ntp_servers:
for ntp_server in self.ntp_servers:
update_ntp_list.append(ntp_server)
if update_ntp_list:
self.results[host.name]['ntp_servers_previous'] = ntp_servers_configured
return ntp_servers_configured, update_ntp_list
def build_changed_message(self, ntp_servers_configured, new_ntp_servers, ntp_servers_to_change, operation):
"""Build changed message"""
check_mode = 'would be ' if self.module.check_mode else ''
if operation == 'overwrite':
# get differences
add = self.get_not_in_list_one(new_ntp_servers, ntp_servers_configured)
remove = self.get_not_in_list_one(ntp_servers_configured, new_ntp_servers)
diff_servers = list(ntp_servers_configured)
if add and remove:
for server in add:
diff_servers.append(server)
for server in remove:
diff_servers.remove(server)
if new_ntp_servers != diff_servers:
message = (
"NTP server %s %sadded and %s %sremoved and the server sequence %schanged as well" %
(self.array_to_string(add), check_mode, self.array_to_string(remove), check_mode, check_mode)
)
else:
if new_ntp_servers != ntp_servers_configured:
message = (
"NTP server %s %sreplaced with %s" %
(self.array_to_string(remove), check_mode, self.array_to_string(add))
)
else:
message = (
"NTP server %s %sremoved and %s %sadded" %
(self.array_to_string(remove), check_mode, self.array_to_string(add), check_mode)
)
elif add:
for server in add:
diff_servers.append(server)
if new_ntp_servers != diff_servers:
message = (
"NTP server %s %sadded and the server sequence %schanged as well" %
(self.array_to_string(add), check_mode, check_mode)
)
else:
message = "NTP server %s %sadded" % (self.array_to_string(add), check_mode)
elif remove:
for server in remove:
diff_servers.remove(server)
if new_ntp_servers != diff_servers:
message = (
"NTP server %s %sremoved and the server sequence %schanged as well" %
(self.array_to_string(remove), check_mode, check_mode)
)
else:
message = "NTP server %s %sremoved" % (self.array_to_string(remove), check_mode)
else:
message = "NTP server sequence %schanged" % check_mode
elif operation == 'add':
message = "NTP server %s %sadded" % (self.array_to_string(ntp_servers_to_change), check_mode)
elif operation == 'delete':
message = "NTP server %s %sremoved" % (self.array_to_string(ntp_servers_to_change), check_mode)
return message
@staticmethod
def get_not_in_list_one(list1, list2):
"""Return entries that ore not in list one"""
return [x for x in list1 if x not in set(list2)]
@staticmethod
def array_to_string(array):
"""Return string from array"""
if len(array) > 2:
string = (
', '.join("'{0}'".format(element) for element in array[:-1]) + ', and '
+ "'{0}'".format(str(array[-1]))
)
elif len(array) == 2:
string = ' and '.join("'{0}'".format(element) for element in array)
elif len(array) == 1:
string = "'{0}'".format(array[0])
return string
@staticmethod
def get_differt_entries(list1, list2):
"""Return different entries of two lists"""
return [a for a in list1 + list2 if (a not in list1) or (a not in list2)]
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
ntp_servers=dict(type='list', required=True),
state=dict(type='str', choices=['absent', 'present']),
verbose=dict(type='bool', default=False, required=False)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_host_ntp_config = VmwareNtpConfigManager(module)
vmware_host_ntp_config.check_host_state()
if __name__ == "__main__":
main()
| gpl-3.0 |
apawar2/php-buildpack | tests/test_rewrite.py | 15 | 5017 | import os
import os.path
import tempfile
import shutil
import subprocess
import imp
from nose.tools import eq_
class BaseRewriteScript(object):
def __init__(self):
info = imp.find_module('runner', ['lib/build_pack_utils'])
self.run = imp.load_module('runner', *info)
def setUp(self):
self.rewrite = os.path.abspath("bin/rewrite")
self.env = {'PYTHONPATH': os.path.abspath('lib')}
self.env.update(os.environ)
# setup config
self.cfg_dir = tempfile.mkdtemp(prefix='config-')
os.rmdir(self.cfg_dir)
# setup directory to run from
self.run_dir = tempfile.mkdtemp(prefix='run-')
os.makedirs(os.path.join(self.run_dir, 'logs'))
os.makedirs(os.path.join(self.run_dir, 'bin'))
def tearDown(self):
if os.path.exists(self.cfg_dir):
shutil.rmtree(self.cfg_dir)
if os.path.exists(self.run_dir):
shutil.rmtree(self.run_dir)
class TestRewriteScriptPhp(BaseRewriteScript):
def __init__(self):
BaseRewriteScript.__init__(self)
def setUp(self):
BaseRewriteScript.setUp(self)
shutil.copytree('defaults/config/php/5.5.x', self.cfg_dir)
def tearDown(self):
BaseRewriteScript.tearDown(self)
def test_rewrite_no_args(self):
try:
self.run.check_output(self.rewrite,
cwd=self.run_dir,
env=self.env,
stderr=subprocess.STDOUT,
shell=True)
assert False
except self.run.CalledProcessError, e:
eq_('Argument required! Specify path to configuration '
'directory.\n', e.output)
eq_(255, e.returncode)
def test_rewrite_arg_file(self):
cfg_file = os.path.join(self.cfg_dir, 'php.ini')
res = self.run.check_output("%s %s" % (self.rewrite, cfg_file),
env=self.env,
cwd=self.run_dir,
stderr=subprocess.STDOUT,
shell=True)
eq_('', res)
with open(os.path.join(self.cfg_dir, 'php.ini')) as fin:
cfgFile = fin.read()
eq_(-1, cfgFile.find('@{HOME}'))
eq_(-1, cfgFile.find('@{TMPDIR}'))
def test_rewrite_arg_dir(self):
res = self.run.check_output("%s %s" % (self.rewrite, self.cfg_dir),
env=self.env,
cwd=self.run_dir,
stderr=subprocess.STDOUT,
shell=True)
eq_('', res)
with open(os.path.join(self.cfg_dir, 'php.ini')) as fin:
cfgFile = fin.read()
eq_(-1, cfgFile.find('@{HOME}'))
eq_(-1, cfgFile.find('@{TMPDIR}'))
with open(os.path.join(self.cfg_dir, 'php-fpm.conf')) as fin:
cfgFile = fin.read()
eq_(-1, cfgFile.find('@{HOME}'))
eq_(-1, cfgFile.find('@{TMPDIR}'))
eq_(True, cfgFile.find('www@my.domain.com') >= 0)
class TestRewriteScriptWithHttpd(BaseRewriteScript):
def __init__(self):
BaseRewriteScript.__init__(self)
def setUp(self):
BaseRewriteScript.setUp(self)
shutil.copytree('defaults/config/httpd/2.4.x', self.cfg_dir)
def tearDown(self):
BaseRewriteScript.tearDown(self)
def test_rewrite_with_sub_dirs(self):
res = self.run.check_output("%s %s" % (self.rewrite, self.cfg_dir),
env=self.env,
cwd=self.run_dir,
stderr=subprocess.STDOUT,
shell=True)
eq_('', res)
for root, dirs, files in os.walk(self.cfg_dir):
for f in files:
with open(os.path.join(root, f)) as fin:
eq_(-1, fin.read().find('@{'))
class TestRewriteScriptWithNginx(BaseRewriteScript):
def __init__(self):
BaseRewriteScript.__init__(self)
def setUp(self):
BaseRewriteScript.setUp(self)
self.env = {'PYTHONPATH': os.path.abspath('lib'),
'PORT': '80'}
self.env.update(os.environ)
shutil.copytree('defaults/config/nginx/1.8.x', self.cfg_dir)
def tearDown(self):
BaseRewriteScript.tearDown(self)
def test_rewrite(self):
res = self.run.check_output("%s %s" % (self.rewrite, self.cfg_dir),
env=self.env,
cwd=self.run_dir,
stderr=subprocess.STDOUT,
shell=True)
eq_('', res)
for root, dirs, files in os.walk(self.cfg_dir):
for f in files:
with open(os.path.join(root, f)) as fin:
eq_(-1, fin.read().find('@{'), f)
| apache-2.0 |
NodesForAll/live | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
nall/pythonista-tradervue | utils.py | 1 | 5565 | # vim: ft=python tabstop=2 shiftwidth=2 expandtab
# Copyright (c) 2015, Jon Nall
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pythonista-tradervue nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import clipboard
import console
import keychain
import logging
import os
import re
import sys
from datetime import datetime, date
sys.path.insert(0, os.path.realpath(os.path.join(os.getcwd(), 'tradervue')))
from tradervue import Tradervue, TradervueLogFormatter
LOG = None
DEBUG = 0 # 1 for normal debug, 2 for HTTP debug as well
KEYCHAIN_ID = 'tradervue'
USER_AGENT = "Pythonista Tradervue (jon.nall@gmail.com)"
def get_args(argv):
args = { 'action': 'set_password',
'user': None,
'text': clipboard.get(),
'date': date.today().strftime('%Y%m%d'),
'overwrite': "0" }
for a in argv:
pairs = a.split(':')
for p in pairs:
(k, v) = p.split('=', 2)
if k not in args:
raise ValueError("Invalid argument '%s'" % (k))
args[k] = v
if args['user'] is None:
args['user'] = console.input_alert("Tradervue Username")
if not re.match(r'^\d{8}$', args['date']):
raise ValueError("Invalid date format '%s'. Must be YYYYMMDD" % (args['date']))
if int(args['overwrite']) == 0:
args['overwrite'] = False
else:
args['overwrite'] = True
args['date'] = datetime.strptime(args['date'], '%Y%m%d')
return args
def set_password(args):
p = console.password_alert("Tradervue Credentials", args['user'])
keychain.set_password(KEYCHAIN_ID, args['user'], p)
return True
def delete_password(args):
if keychain.get_password(KEYCHAIN_ID, args['user']) is None:
LOG.error("No password was set for %s" % (args['user']))
return False
else:
keychain.delete_password(KEYCHAIN_ID, args['user'])
LOG.info("Deleted credentials for %s" % (args['user']))
return True
def new_note(args, tv):
note_id = tv.create_note(args['text'])
if note_id is None:
LOG.error("Failed to create new note")
return False
else:
LOG.info("Created new note with ID %s" % (note_id))
return True
def update_journal(args, tv):
datestring = args['date'].strftime('%Y-%m-%d')
# Check if we have an existing entry on the date. If not, just create it
# Otherwise overwrite it if args['overwrite'] is set or append to it if not
#
journal = tv.get_journal(date = args['date'])
if journal is None:
journal_id = tv.create_journal(args['date'], args['text'])
if journal_id is None:
LOG.error("Failed to create journal on %s" % (datestring))
return False
else:
LOG.info("Created new journal on %s with ID %s" % (datestring, journal_id))
return True
else:
verb = 'Appended'
text = journal['notes']
if args['overwrite']:
verb = 'Overwrite'
text = ''
text += "\n%s" % (args['text'])
print text
if tv.update_journal(journal['id'], text):
LOG.info("%s journal on %s (ID %s)" % (verb, journal['id'], datestring))
return True
else:
LOG.error("Failed to update journal on %s (ID %s)" % (datestring, journal['id']))
return False
def main():
global LOG
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
if DEBUG > 1:
LOG.setLevel(logging.DEBUG)
c = logging.StreamHandler()
c.setFormatter(TradervueLogFormatter())
LOG.addHandler(c)
args = get_args(sys.argv[1:])
actions = { 'set_password': set_password,
'delete_password': delete_password,
'new_note': new_note,
'update_journal': update_journal }
ok = False
if args['action'] not in actions:
raise ValueError("Invalid action '%s'" % (args['action']))
elif args['action'].endswith('_password'):
ok = actions[args['action']](args)
else:
p = keychain.get_password(KEYCHAIN_ID, args['user'])
if p is None:
# Request one from the user
p = console.password_alert("Tradervue Credentials", args['user'])
else:
tv = Tradervue(args['user'], p, USER_AGENT, verbose_http = True if DEBUG > 1 else False)
ok = actions[args['action']](args, tv)
return 0 if ok else 1
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
taknevski/tensorflow-xsmm | tensorflow/python/framework/device.py | 150 | 9078 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
class DeviceSpec(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self.job = job
self.replica = replica
self.task = task
if device_type == "cpu" or device_type == "gpu":
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
self.device_type = device_type.upper()
else:
self.device_type = device_type
self.device_index = device_index
def _clear(self):
self._job = None
self._replica = None
self._task = None
self.device_type = None
self.device_index = None
@property
def job(self):
return self._job
@job.setter
def job(self, job):
if job is not None:
self._job = str(job)
else:
self._job = None
@property
def replica(self):
return self._replica
@replica.setter
def replica(self, replica):
if replica is not None:
self._replica = int(replica)
else:
self._replica = None
@property
def task(self):
return self._task
@task.setter
def task(self, task):
if task is not None:
self._task = int(task)
else:
self._task = None
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
self._clear()
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":
self.replica = y[1]
elif ly == 2 and y[0] == "task":
self.task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[0].upper()
if ly == 2 and y[1] != "*":
self.device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[1]
if y[2] != "*":
self.device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Args:
dev: a `DeviceSpec`.
"""
if dev.job is not None:
self.job = dev.job
if dev.replica is not None:
self.replica = dev.replica
if dev.task is not None:
self.task = dev.task
if dev.device_type is not None:
self.device_type = dev.device_type
if dev.device_index is not None:
self.device_index = dev.device_index
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
dev = ""
if self.job is not None:
dev += "/job:" + self.job
if self.replica is not None:
dev += "/replica:" + str(self.replica)
if self.task is not None:
dev += "/task:" + str(self.task)
if self.device_type is not None:
device_index_string = "*"
if self.device_index is not None:
device_index_string = str(self.device_index)
dev += "/device:%s:%s" % (self.device_type, device_index_string)
return dev
@staticmethod
def from_string(spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return DeviceSpec().parse_from_string(spec)
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a DeviceSpec. It will assert a failure if spec is invalid.
DeviceSpec.from_string(spec)
def canonical_name(device):
"""Returns a canonical name for the given `DeviceSpec` or device name."""
if device is None:
return ""
if isinstance(device, DeviceSpec):
return device.to_string()
else:
device = DeviceSpec.from_string(device)
return device.to_string()
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.device(merge_device("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.device(merge_device("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.device(merge_device("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.device(merge_device("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A `DeviceSpec` or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A device function with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if not isinstance(spec, DeviceSpec):
spec = DeviceSpec.from_string(spec or "")
def _device_function(node_def):
current_device = DeviceSpec.from_string(node_def.device or "")
copy_spec = copy.copy(spec)
copy_spec.merge_from(current_device) # current_device takes precedence.
return copy_spec
return _device_function
| apache-2.0 |
smcoll/stormpath-django | django_stormpath/social.py | 1 | 7183 | from django.contrib.auth import login as django_login
from django.shortcuts import resolve_url
from django.core.urlresolvers import reverse
from django.conf import settings
from stormpath.error import Error as StormpathError
from stormpath.resources.provider import Provider
from requests_oauthlib import OAuth2Session
from .models import CLIENT, APPLICATION
from .backends import StormpathSocialBackend
SOCIAL_AUTH_BACKEND = 'django_stormpath.backends.StormpathSocialBackend'
GITHUB_AUTHORIZATION_BASE_URL = 'https://github.com/login/oauth/authorize'
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
GOOGLE_AUTHORIZATION_BASE_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
FACEBOOK_AUTHORIZATION_BASE_URL = 'https://www.facebook.com/dialog/oauth'
FACEBOOK_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
LINKEDIN_AUTHORIZATION_BASE_URL = 'https://www.linkedin.com/uas/oauth2/authorization'
LINKEDIN_TOKEN_URL = 'https://www.linkedin.com/uas/oauth2/accessToken'
def _get_django_user(account):
backend = StormpathSocialBackend()
return backend.authenticate(account=account)
def get_access_token(provider, authorization_response, redirect_uri):
if provider == Provider.GOOGLE:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
redirect_uri=redirect_uri
)
ret = p.fetch_token(GOOGLE_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GOOGLE']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
ret = p.fetch_token(FACEBOOK_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['FACEBOOK']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
ret = p.fetch_token(GITHUB_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GITHUB']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
ret = p.fetch_token(LINKEDIN_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['LINKEDIN']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
else:
return None
def handle_social_callback(request, provider):
provider_redirect_url = 'stormpath_' + provider.lower() + '_login_callback'
abs_redirect_uri = request.build_absolute_uri(
reverse(provider_redirect_url, kwargs={'provider': provider}))
access_token = get_access_token(
provider,
request.build_absolute_uri(),
abs_redirect_uri)
if not access_token:
raise RuntimeError('Error communicating with Autentication Provider: %s' % provider)
params = {'provider': provider, 'access_token': access_token}
try:
account = APPLICATION.get_provider_account(**params)
except StormpathError as e:
# We might be missing a social directory
# First we look for one and see if it's already there
# and just error out
for asm in APPLICATION.account_store_mappings:
if (getattr(asm.account_store, 'provider') and
asm.account_store.provider.provider_id == provider):
raise e
# Or if we couldn't find one we create it for the user
# map it to the current application
# and try authenticate again
create_provider_directory(provider, abs_redirect_uri)
account = APPLICATION.get_provider_account(**params)
user = _get_django_user(account)
user.backend = SOCIAL_AUTH_BACKEND
django_login(request, user)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def create_provider_directory(provider, redirect_uri):
"""Helper function for creating a provider directory"""
dir = CLIENT.directories.create({
'name': APPLICATION.name + '-' + provider,
'provider': {
'client_id': settings.STORMPATH_SOCIAL[provider.upper()]['client_id'],
'client_secret': settings.STORMPATH_SOCIAL[provider.upper()]['client_secret'],
'redirect_uri': redirect_uri,
'provider_id': provider,
},
})
APPLICATION.account_store_mappings.create({
'application': APPLICATION,
'account_store': dir,
'list_index': 99,
'is_default_account_store': False,
'is_default_group_store': False,
})
def get_authorization_url(provider, redirect_uri):
if provider == Provider.GOOGLE:
scope = [
"email",
"profile"
]
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
scope=scope,
redirect_uri=redirect_uri
)
authorization_url, state = p.authorization_url(GOOGLE_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
authorization_url, state = p.authorization_url(FACEBOOK_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
authorization_url, state = p.authorization_url(GITHUB_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
authorization_url, state = p.authorization_url(LINKEDIN_AUTHORIZATION_BASE_URL)
return authorization_url, state
else:
raise RuntimeError('Invalid Provider %s' % provider)
| apache-2.0 |
naemono/pyrax | tests/unit/test_image.py | 12 | 20320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
from collections import OrderedDict
import pyrax
from pyrax.manager import BaseManager
import pyrax.image
from pyrax.image import assure_image
from pyrax.image import ImageMember
from pyrax.image import ImageTasksManager
from pyrax.image import JSONSchemaManager
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
class ImageTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ImageTest, self).__init__(*args, **kwargs)
def setUp(self):
self.identity = fakes.FakeIdentity()
self.client = fakes.FakeImageClient(self.identity)
self.client._manager = fakes.FakeImageManager(self.client)
self.image = fakes.FakeImage()
super(ImageTest, self).setUp()
def tearDown(self):
super(ImageTest, self).tearDown()
def test_assure_image(self):
class TestClient(object):
_manager = fakes.FakeManager()
@assure_image
def test_method(self, img):
return img
client = TestClient()
client._manager.get = Mock(return_value=self.image)
# Pass the image
ret = client.test_method(self.image)
self.assertTrue(ret is self.image)
# Pass the ID
ret = client.test_method(self.image.id)
self.assertTrue(ret is self.image)
def test_img_update(self):
img = self.image
key = utils.random_unicode()
val = utils.random_unicode()
img.manager.update = Mock()
img.update({key: val})
img.manager.update.assert_called_once_with(img, {key: val})
def test_img_change_name(self):
img = self.image
nm = utils.random_unicode()
img.update = Mock()
img.change_name(nm)
img.update.assert_called_once_with({"name": nm})
def test_img_list_members(self):
img = self.image
img._member_manager.list = Mock()
img.list_members()
img._member_manager.list.assert_called_once_with()
def test_img_get_member(self):
img = self.image
member = utils.random_unicode()
img._member_manager.get = Mock()
img.get_member(member)
img._member_manager.get.assert_called_once_with(member)
def test_img_create_member(self):
img = self.image
project_id = utils.random_unicode()
img._member_manager.create = Mock()
img.add_member(project_id)
img._member_manager.create.assert_called_once_with(name=None,
project_id=project_id)
def test_img_delete_member(self):
img = self.image
project_id = utils.random_unicode()
img._member_manager.delete = Mock()
img.delete_member(project_id)
img._member_manager.delete.assert_called_once_with(project_id)
def test_img_add_tag(self):
img = self.image
tag = utils.random_unicode()
img._tag_manager.add = Mock()
img.add_tag(tag)
img._tag_manager.add.assert_called_once_with(tag)
def test_img_delete_tag(self):
img = self.image
tag = utils.random_unicode()
img._tag_manager.delete = Mock()
img.delete_tag(tag)
img._tag_manager.delete.assert_called_once_with(tag)
def test_member_id(self):
mid = utils.random_unicode()
member = ImageMember(self.client._manager, {"member_id": mid})
self.assertEqual(member.id, mid)
def test_imgmgr_create_body(self):
clt = self.client
mgr = clt._manager
nm = utils.random_unicode()
meta = utils.random_unicode()
body = mgr._create_body(nm, metadata=meta)
self.assertEqual(body, {"metadata": meta})
def test_imgmgr_create_body_empty(self):
clt = self.client
mgr = clt._manager
nm = utils.random_unicode()
body = mgr._create_body(nm)
self.assertEqual(body, {})
def test_imgmgr_list(self):
clt = self.client
mgr = clt._manager
limit = utils.random_unicode()
marker = utils.random_unicode()
name = utils.random_unicode()
visibility = utils.random_unicode()
member_status = utils.random_unicode()
owner = utils.random_unicode()
tag = utils.random_unicode()
status = utils.random_unicode()
size_min = utils.random_unicode()
size_max = utils.random_unicode()
sort_key = utils.random_unicode()
sort_dir = utils.random_unicode()
return_raw = utils.random_unicode()
qs = utils.random_unicode()
mgr._list = Mock()
sav = utils.dict_to_qs
utils.dict_to_qs = Mock(return_value=qs)
expected = "/%s?%s" % (mgr.uri_base, qs)
mgr.list(limit=limit, marker=marker, name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir, return_raw=return_raw)
mgr._list.assert_called_once_with(expected, return_raw=return_raw)
utils.dict_to_qs = sav
def test_imgmgr_list_all(self):
clt = self.client
mgr = clt._manager
next_link = "/images?marker=00000000-0000-0000-0000-0000000000"
fake_body = {"images": [{"name": "fake1"}], "next": "/v2%s" % next_link}
mgr.list = Mock(return_value=(None, fake_body))
fake_last_body = {"images": [{"name": "fake2"}], "next": ""}
mgr.api.method_get = Mock(return_value=(None, fake_last_body))
ret = mgr.list_all()
self.assertEqual(len(ret), 2)
mgr.list.assert_called_once_with(name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None,
return_raw=True)
mgr.api.method_get.assert_called_once_with(next_link)
def test_imgmgr_update(self):
clt = self.client
mgr = clt._manager
img = self.image
setattr(img, "foo", "old")
valdict = OrderedDict([("foo", "new"), ("bar", "new")])
mgr.api.method_patch = Mock(return_value=(None, None))
mgr.get = Mock(return_value=img)
exp_uri = "/%s/%s" % (mgr.uri_base, img.id)
exp_body = [{"op": "replace", "path": "/foo", "value": "new"},
{"op": "add", "path": "/bar", "value": "new"}]
exp_hdrs = {"Content-Type":
"application/openstack-images-v2.1-json-patch"}
mgr.update(img, valdict)
mgr.api.method_patch.assert_called_once_with(exp_uri, body=exp_body,
headers=exp_hdrs)
def test_imgmgr_update_member(self):
clt = self.client
mgr = clt._manager
img = self.image
status = random.choice(("pending", "accepted", "rejected"))
project_id = utils.random_unicode()
clt.identity.tenant_id = project_id
exp_uri = "/%s/%s/members/%s" % (mgr.uri_base, img.id, project_id)
exp_body = {"status": status}
mgr.api.method_put = Mock(return_value=(None, None))
mgr.update_image_member(img.id, status)
mgr.api.method_put.assert_called_once_with(exp_uri, body=exp_body)
def test_imgmgr_update_member_bad(self):
clt = self.client
mgr = clt._manager
img = self.image
bad_status = "BAD"
self.assertRaises(exc.InvalidImageMemberStatus, mgr.update_image_member,
img.id, bad_status)
def test_imgmgr_update_member_not_found(self):
clt = self.client
mgr = clt._manager
img = self.image
status = random.choice(("pending", "accepted", "rejected"))
project_id = utils.random_unicode()
clt.identity.tenant_id = project_id
exp_uri = "/%s/%s/members/%s" % (mgr.uri_base, img.id, project_id)
exp_body = {"status": status}
mgr.api.method_put = Mock(side_effect=exc.NotFound(""))
self.assertRaises(exc.InvalidImageMember, mgr.update_image_member,
img.id, status)
def test_img_member_mgr_create_body(self):
img = self.image
mgr = img._member_manager
nm = utils.random_unicode()
project_id = utils.random_unicode()
ret = mgr._create_body(nm, project_id)
self.assertEqual(ret, {"member": project_id})
def test_img_member_mgr_create(self):
img = self.image
mgr = img._member_manager
nm = utils.random_unicode()
val = utils.random_unicode()
sav = BaseManager.create
BaseManager.create = Mock(return_value=val)
ret = mgr.create(nm)
self.assertEqual(ret, val)
BaseManager.create = sav
def test_img_member_mgr_create_403(self):
img = self.image
mgr = img._member_manager
nm = utils.random_unicode()
sav = BaseManager.create
err = exc.Forbidden(403)
BaseManager.create = Mock(side_effect=err)
self.assertRaises(exc.UnsharableImage, mgr.create, nm)
BaseManager.create = sav
def test_img_member_mgr_create_other(self):
img = self.image
mgr = img._member_manager
nm = utils.random_unicode()
sav = BaseManager.create
err = exc.OverLimit(413)
BaseManager.create = Mock(side_effect=err)
self.assertRaises(exc.OverLimit, mgr.create, nm)
BaseManager.create = sav
def test_img_tag_mgr_create(self):
img = self.image
mgr = img._tag_manager
nm = utils.random_unicode()
ret = mgr._create_body(nm)
self.assertEqual(ret, {})
def test_img_tag_mgr_add(self):
img = self.image
mgr = img._tag_manager
tag = utils.random_unicode()
exp_uri = "/%s/%s" % (mgr.uri_base, tag)
mgr.api.method_put = Mock(return_value=(None, None))
mgr.add(tag)
mgr.api.method_put.assert_called_once_with(exp_uri)
def test_img_tasks_mgr_create_export(self):
clt = self.client
mgr = clt._tasks_manager
img = self.image
cont = utils.random_unicode()
img_format = utils.random_unicode()
img_name = utils.random_unicode()
name = "export"
ret = mgr._create_body(name, img=img, cont=cont, img_format=img_format,
img_name=img_name)
exp = {"type": name, "input": {
"image_uuid": img.id,
"receiving_swift_container": cont}}
self.assertEqual(ret, exp)
def test_img_tasks_mgr_create_import(self):
clt = self.client
mgr = clt._tasks_manager
img = self.image
cont = utils.random_unicode()
img_format = utils.random_unicode()
img_name = utils.random_unicode()
name = "import"
ret = mgr._create_body(name, img=img, cont=cont, img_format=img_format,
img_name=img_name)
exp = {"type": name, "input": {
"image_properties": {"name": img_name},
"import_from": "%s/%s" % (cont, img.id),
"import_from_format": img_format}}
self.assertEqual(ret, exp)
@patch("pyrax.manager.BaseManager.create")
def test_img_tasks_mgr_create(self, mock_create):
clt = self.client
mgr = clt._tasks_manager
nm = utils.random_unicode()
cont = utils.random_unicode()
class FakeCF(object):
def get_container(self, cont):
return cont
class FakeRegion(object):
client = FakeCF()
api = mgr.api
rgn = api.region_name
api.identity.object_store = {rgn: FakeRegion()}
mgr.create(nm, cont=cont)
mock_create.assert_called_once_with(nm, cont=cont)
def test_jsonscheme_mgr(self):
mgr = JSONSchemaManager(self.client)
nm = utils.random_unicode()
ret = mgr._create_body(nm)
self.assertIsNone(ret)
def test_jsonscheme_mgr_images(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/images" % mgr.uri_base
mgr.images()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_jsonscheme_mgr_image(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/image" % mgr.uri_base
mgr.image()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_jsonscheme_mgr_members(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/members" % mgr.uri_base
mgr.image_members()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_jsonscheme_mgr_member(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/member" % mgr.uri_base
mgr.image_member()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_jsonscheme_mgr_tasks(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/tasks" % mgr.uri_base
mgr.image_tasks()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_jsonscheme_mgr_task(self):
mgr = JSONSchemaManager(self.client)
mgr.api.method_get = Mock(return_value=(None, None))
exp_uri = "/%s/task" % mgr.uri_base
mgr.image_task()
mgr.api.method_get.assert_called_once_with(exp_uri)
def test_clt_list(self):
clt = self.client
mgr = clt._manager
limit = utils.random_unicode()
marker = utils.random_unicode()
name = utils.random_unicode()
visibility = utils.random_unicode()
member_status = utils.random_unicode()
owner = utils.random_unicode()
tag = utils.random_unicode()
status = utils.random_unicode()
size_min = utils.random_unicode()
size_max = utils.random_unicode()
sort_key = utils.random_unicode()
sort_dir = utils.random_unicode()
mgr.list = Mock()
clt.list(limit=limit, marker=marker, name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir)
mgr.list.assert_called_once_with(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir)
def test_clt_list_all(self):
clt = self.client
mgr = clt._manager
mgr.list_all = Mock()
clt.list_all()
mgr.list_all.assert_called_once_with(name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None)
def test_clt_update(self):
clt = self.client
mgr = clt._manager
img = self.image
key = utils.random_unicode()
val = utils.random_unicode()
upd = {key: val}
mgr.update = Mock()
clt.update(img, upd)
mgr.update.assert_called_once_with(img, upd)
def test_clt_change_image_name(self):
clt = self.client
mgr = clt._manager
img = self.image
nm = utils.random_unicode()
clt.update = Mock()
clt.change_image_name(img, nm)
clt.update.assert_called_once_with(img, {"name": nm})
def test_clt_list_image_members(self):
clt = self.client
img = self.image
img.list_members = Mock()
clt.list_image_members(img)
img.list_members.assert_called_once_with()
def test_clt_get_image_member(self):
clt = self.client
img = self.image
member = utils.random_unicode()
img.get_member = Mock()
clt.get_image_member(img, member)
img.get_member.assert_called_once_with(member)
def test_clt_add_image_member(self):
clt = self.client
img = self.image
project_id = utils.random_unicode()
img.add_member = Mock()
clt.add_image_member(img, project_id)
img.add_member.assert_called_once_with(project_id)
def test_clt_delete_image_member(self):
clt = self.client
img = self.image
project_id = utils.random_unicode()
img.delete_member = Mock()
clt.delete_image_member(img, project_id)
img.delete_member.assert_called_once_with(project_id)
def test_clt_update_img_member(self):
clt = self.client
mgr = clt._manager
img = self.image
status = utils.random_unicode()
mgr.update_image_member = Mock()
clt.update_image_member(img, status)
mgr.update_image_member.assert_called_once_with(img, status)
def test_clt_add_image_tag(self):
clt = self.client
img = self.image
tag = utils.random_unicode()
img.add_tag = Mock()
clt.add_image_tag(img, tag)
img.add_tag.assert_called_once_with(tag)
def test_clt_delete_image_tag(self):
clt = self.client
img = self.image
tag = utils.random_unicode()
img.delete_tag = Mock()
clt.delete_image_tag(img, tag)
img.delete_tag.assert_called_once_with(tag)
def test_clt_list_tasks(self):
clt = self.client
mgr = clt._tasks_manager
mgr.list = Mock()
clt.list_tasks()
mgr.list.assert_called_once_with()
def test_clt_get_task(self):
clt = self.client
mgr = clt._tasks_manager
task = utils.random_unicode()
mgr.get = Mock()
clt.get_task(task)
mgr.get.assert_called_once_with(task)
def test_clt_export_task(self):
clt = self.client
mgr = clt._tasks_manager
img = self.image
cont = utils.random_unicode()
mgr.create = Mock()
clt.export_task(img, cont)
mgr.create.assert_called_once_with("export", img=img, cont=cont)
def test_clt_import_task(self):
clt = self.client
mgr = clt._tasks_manager
img = self.image
cont = utils.random_unicode()
img_format = utils.random_unicode()
img_name = utils.random_unicode()
mgr.create = Mock()
clt.import_task(img, cont, img_format=img_format, img_name=img_name)
mgr.create.assert_called_once_with("import", img=img, cont=cont,
img_format=img_format, img_name=img_name)
def test_clt_get_images_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.images = Mock()
clt.get_images_schema()
mgr.images.assert_called_once_with()
def test_clt_get_image_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.image = Mock()
clt.get_image_schema()
mgr.image.assert_called_once_with()
def test_clt_get_image_members_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.image_members = Mock()
clt.get_image_members_schema()
mgr.image_members.assert_called_once_with()
def test_clt_get_image_member_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.image_member = Mock()
clt.get_image_member_schema()
mgr.image_member.assert_called_once_with()
def test_clt_get_image_tasks_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.image_tasks = Mock()
clt.get_image_tasks_schema()
mgr.image_tasks.assert_called_once_with()
def test_clt_get_image_task_schema(self):
clt = self.client
mgr = clt._schema_manager
mgr.image_task = Mock()
clt.get_image_task_schema()
mgr.image_task.assert_called_once_with()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
pweatherbee/HashletCoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
zdravi/jrnl | features/steps/core.py | 9 | 6720 | from behave import *
from jrnl import cli, Journal, util
from dateutil import parser as date_parser
import os
import codecs
import json
import keyring
keyring.set_keyring(keyring.backends.file.PlaintextKeyring())
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
import tzlocal
def _parse_args(command):
nargs=[]
concats = []
for a in command.split()[1:]:
if a.startswith("'"):
concats.append(a.strip("'"))
elif a.endswith("'"):
concats.append(a.strip("'"))
nargs.append(u" ".join(concats))
concats = []
else:
nargs.append(a)
return nargs
def read_journal(journal_name="default"):
with open(cli.CONFIG_PATH) as config_file:
config = json.load(config_file)
with codecs.open(config['journals'][journal_name], 'r', 'utf-8') as journal_file:
journal = journal_file.read()
return journal
def open_journal(journal_name="default"):
with open(cli.CONFIG_PATH) as config_file:
config = json.load(config_file)
journal_conf = config['journals'][journal_name]
if type(journal_conf) is dict: # We can override the default config on a by-journal basis
config.update(journal_conf)
else: # But also just give them a string to point to the journal file
config['journal'] = journal_conf
return Journal.Journal(**config)
@given('we use the config "{config_file}"')
def set_config(context, config_file):
full_path = os.path.join("features/configs", config_file)
cli.CONFIG_PATH = os.path.abspath(full_path)
@when('we run "{command}" and enter')
@when('we run "{command}" and enter "{inputs}"')
def run_with_input(context, command, inputs=None):
text = inputs or context.text
args = _parse_args(command)
buffer = StringIO(text.strip())
util.STDIN = buffer
try:
cli.run(args)
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
@when('we run "{command}"')
def run(context, command):
args = _parse_args(command)
try:
cli.run(args)
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
@when('we set the keychain password of "{journal}" to "{password}"')
def set_keychain(context, journal, password):
keyring.set_password('jrnl', journal, password)
@then('we should get an error')
def has_error(context):
assert context.exit_status != 0, context.exit_status
@then('we should get no error')
def no_error(context):
assert context.exit_status is 0, context.exit_status
@then('the output should be parsable as json')
def check_output_json(context):
out = context.stdout_capture.getvalue()
assert json.loads(out), out
@then('"{field}" in the json output should have {number:d} elements')
@then('"{field}" in the json output should have 1 element')
def check_output_field(context, field, number=1):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json, [field, out_json]
assert len(out_json[field]) == number, len(out_json[field])
@then('"{field}" in the json output should not contain "{key}"')
def check_output_field_not_key(context, field, key):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json
assert key not in out_json[field]
@then('"{field}" in the json output should contain "{key}"')
def check_output_field_key(context, field, key):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json
assert key in out_json[field]
@then('the output should be')
@then('the output should be "{text}"')
def check_output(context, text=None):
text = (text or context.text).strip().splitlines()
out = context.stdout_capture.getvalue().strip().splitlines()
assert len(text) == len(out), "Output has {} lines (expected: {})".format(len(out), len(text))
for line_text, line_out in zip(text, out):
assert line_text.strip() == line_out.strip(), [line_text.strip(), line_out.strip()]
@then('the output should contain "{text}" in the local time')
def check_output_time_inline(context, text):
out = context.stdout_capture.getvalue()
local_tz = tzlocal.get_localzone()
local_time = date_parser.parse(text).astimezone(local_tz).strftime("%Y-%m-%d %H:%M")
assert local_time in out, local_time
@then('the output should contain "{text}"')
def check_output_inline(context, text):
out = context.stdout_capture.getvalue()
if isinstance(out, bytes):
out = out.decode('utf-8')
assert text in out
@then('the output should not contain "{text}"')
def check_output_not_inline(context, text):
out = context.stdout_capture.getvalue()
if isinstance(out, bytes):
out = out.decode('utf-8')
assert text not in out
@then('we should see the message "{text}"')
def check_message(context, text):
out = context.messages.getvalue()
assert text in out, [text, out]
@then('we should not see the message "{text}"')
def check_not_message(context, text):
out = context.messages.getvalue()
assert text not in out, [text, out]
@then('the journal should contain "{text}"')
@then('journal "{journal_name}" should contain "{text}"')
def check_journal_content(context, text, journal_name="default"):
journal = read_journal(journal_name)
assert text in journal, journal
@then('journal "{journal_name}" should not exist')
def journal_doesnt_exist(context, journal_name="default"):
with open(cli.CONFIG_PATH) as config_file:
config = json.load(config_file)
journal_path = config['journals'][journal_name]
assert not os.path.exists(journal_path)
@then('the config should have "{key}" set to "{value}"')
@then('the config for journal "{journal}" should have "{key}" set to "{value}"')
def config_var(context, key, value, journal=None):
t, value = value.split(":")
value = {
"bool": lambda v: v.lower() == "true",
"int": int,
"str": str
}[t](value)
with open(cli.CONFIG_PATH) as config_file:
config = json.load(config_file)
if journal:
config = config["journals"][journal]
assert key in config
assert config[key] == value
@then('the journal should have {number:d} entries')
@then('the journal should have {number:d} entry')
@then('journal "{journal_name}" should have {number:d} entries')
@then('journal "{journal_name}" should have {number:d} entry')
def check_num_entries(context, number, journal_name="default"):
journal = open_journal(journal_name)
assert len(journal.entries) == number
@then('fail')
def debug_fail(context):
assert False
| mit |
sanjuro/RCJK | vendor/atom/url.py | 280 | 4277 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| apache-2.0 |
jesramirez/odoo | addons/im_chat/im_chat.py | 268 | 21928 | # -*- coding: utf-8 -*-
import base64
import datetime
import logging
import time
import uuid
import random
import simplejson
import openerp
from openerp.http import request
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.addons.bus.bus import TIMEOUT
_logger = logging.getLogger(__name__)
DISCONNECTION_TIMER = TIMEOUT + 5
AWAY_TIMER = 600 # 10 minutes
#----------------------------------------------------------
# Models
#----------------------------------------------------------
class im_chat_conversation_state(osv.Model):
""" Adds a state on the m2m between user and session. """
_name = 'im_chat.conversation_state'
_table = "im_chat_session_res_users_rel"
_columns = {
"state" : fields.selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')]),
"session_id" : fields.many2one('im_chat.session', 'Session', required=True, ondelete="cascade"),
"user_id" : fields.many2one('res.users', 'Users', required=True, ondelete="cascade"),
}
_defaults = {
"state" : 'open'
}
class im_chat_session(osv.Model):
""" Conversations."""
_order = 'id desc'
_name = 'im_chat.session'
_rec_name = 'uuid'
_columns = {
'uuid': fields.char('UUID', size=50, select=True),
'message_ids': fields.one2many('im_chat.message', 'to_id', 'Messages'),
'user_ids': fields.many2many('res.users', 'im_chat_session_res_users_rel', 'session_id', 'user_id', "Session Users"),
'session_res_users_rel': fields.one2many('im_chat.conversation_state', 'session_id', 'Relation Session Users'),
}
_defaults = {
'uuid': lambda *args: '%s' % uuid.uuid4(),
}
def is_in_session(self, cr, uid, uuid, user_id, context=None):
""" return if the given user_id is in the session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
return user_id and user_id in [u.id for u in session.user_ids]
return False
def users_infos(self, cr, uid, ids, context=None):
""" get the user infos for all the user in the session """
for session in self.pool["im_chat.session"].browse(cr, uid, ids, context=context):
users_infos = self.pool["res.users"].read(cr, uid, [u.id for u in session.user_ids], ['id','name', 'im_status'], context=context)
return users_infos
def is_private(self, cr, uid, ids, context=None):
for session_id in ids:
""" return true if the session is private between users no external messages """
mess_ids = self.pool["im_chat.message"].search(cr, uid, [('to_id','=',session_id),('from_id','=',None)], context=context)
return len(mess_ids) == 0
def session_info(self, cr, uid, ids, context=None):
""" get the session info/header of a given session """
for session in self.browse(cr, uid, ids, context=context):
info = {
'uuid': session.uuid,
'users': session.users_infos(),
'state': 'open',
}
# add uid_state if available
if uid:
domain = [('user_id','=',uid), ('session_id','=',session.id)]
uid_state = self.pool['im_chat.conversation_state'].search_read(cr, uid, domain, ['state'], context=context)
if uid_state:
info['state'] = uid_state[0]['state']
return info
def session_get(self, cr, uid, user_to, context=None):
""" returns the canonical session between 2 users, create it if needed """
session_id = False
if user_to:
sids = self.search(cr, uid, [('user_ids','in', user_to),('user_ids', 'in', [uid])], context=context, limit=1)
for sess in self.browse(cr, uid, sids, context=context):
if len(sess.user_ids) == 2 and sess.is_private():
session_id = sess.id
break
else:
session_id = self.create(cr, uid, { 'user_ids': [(6,0, (user_to, uid))] }, context=context)
return self.session_info(cr, uid, [session_id], context=context)
def update_state(self, cr, uid, uuid, state=None, context=None):
""" modify the fold_state of the given session, and broadcast to himself (e.i. : to sync multiple tabs) """
domain = [('user_id','=',uid), ('session_id.uuid','=',uuid)]
ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
for sr in self.pool['im_chat.conversation_state'].browse(cr, uid, ids, context=context):
if not state:
state = sr.state
if sr.state == 'open':
state = 'folded'
else:
state = 'open'
self.pool['im_chat.conversation_state'].write(cr, uid, ids, {'state': state}, context=context)
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname, 'im_chat.session', uid), sr.session_id.session_info())
def add_user(self, cr, uid, uuid, user_id, context=None):
""" add the given user to the given session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if user_id not in [u.id for u in session.user_ids]:
self.write(cr, uid, [session.id], {'user_ids': [(4, user_id)]}, context=context)
# notify the all the channel users and anonymous channel
notifications = []
for channel_user_id in session.user_ids:
info = self.session_info(cr, channel_user_id.id, [session.id], context=context)
notifications.append([(cr.dbname, 'im_chat.session', channel_user_id.id), info])
# Anonymous are not notified when a new user is added : cannot exec session_info as uid = None
info = self.session_info(cr, openerp.SUPERUSER_ID, [session.id], context=context)
notifications.append([session.uuid, info])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# send a message to the conversation
user = self.pool['res.users'].read(cr, uid, user_id, ['name'], context=context)
self.pool["im_chat.message"].post(cr, uid, uid, session.uuid, "meta", user['name'] + " joined the conversation.", context=context)
def get_image(self, cr, uid, uuid, user_id, context=None):
""" get the avatar of a user in the given session """
#default image
image_b64 = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
# get the session
if user_id:
session_id = self.pool["im_chat.session"].search(cr, uid, [('uuid','=',uuid), ('user_ids','in', user_id)])
if session_id:
# get the image of the user
res = self.pool["res.users"].read(cr, uid, [user_id], ["image_small"])[0]
if res["image_small"]:
image_b64 = res["image_small"]
return image_b64
class im_chat_message(osv.Model):
""" Sessions messsages type can be 'message' or 'meta'.
For anonymous message, the from_id is False.
Messages are sent to a session not to users.
"""
_name = 'im_chat.message'
_order = "id desc"
_columns = {
'create_date': fields.datetime('Create Date', required=True, select=True),
'from_id': fields.many2one('res.users', 'Author'),
'to_id': fields.many2one('im_chat.session', 'Session To', required=True, select=True, ondelete='cascade'),
'type': fields.selection([('message','Message'), ('meta','Meta')], 'Type'),
'message': fields.char('Message'),
}
_defaults = {
'type' : 'message',
}
def init_messages(self, cr, uid, context=None):
""" get unread messages and old messages received less than AWAY_TIMER
ago and the session_info for open or folded window
"""
# get the message since the AWAY_TIMER
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
threshold = threshold.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('to_id.user_ids', 'in', [uid]), ('create_date','>',threshold)]
# get the message since the last poll of the user
presence_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', '=', uid)], context=context)
if presence_ids:
presence = self.pool['im_chat.presence'].browse(cr, uid, presence_ids, context=context)[0]
threshold = presence.last_poll
domain.append(('create_date','>',threshold))
messages = self.search_read(cr, uid, domain, ['from_id','to_id','create_date','type','message'], order='id asc', context=context)
# get the session of the messages and the not-closed ones
session_ids = map(lambda m: m['to_id'][0], messages)
domain = [('user_id','=',uid), '|', ('state','!=','closed'), ('session_id', 'in', session_ids)]
session_rels_ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
# re-open the session where a message have been recieve recently
session_rels = self.pool['im_chat.conversation_state'].browse(cr, uid, session_rels_ids, context=context)
reopening_session = []
notifications = []
for sr in session_rels:
si = sr.session_id.session_info()
si['state'] = sr.state
if sr.state == 'closed':
si['state'] = 'folded'
reopening_session.append(sr.id)
notifications.append([(cr.dbname,'im_chat.session', uid), si])
for m in messages:
notifications.append([(cr.dbname,'im_chat.session', uid), m])
self.pool['im_chat.conversation_state'].write(cr, uid, reopening_session, {'state': 'folded'}, context=context)
return notifications
def post(self, cr, uid, from_uid, uuid, message_type, message_content, context=None):
""" post and broadcast a message, return the message id """
message_id = False
Session = self.pool['im_chat.session']
session_ids = Session.search(cr, uid, [('uuid','=',uuid)], context=context)
notifications = []
for session in Session.browse(cr, uid, session_ids, context=context):
# build the new message
vals = {
"from_id": from_uid,
"to_id": session.id,
"type": message_type,
"message": message_content,
}
# save it
message_id = self.create(cr, uid, vals, context=context)
# broadcast it to channel (anonymous users) and users_ids
data = self.read(cr, uid, [message_id], ['from_id','to_id','create_date','type','message'], context=context)[0]
notifications.append([uuid, data])
for user in session.user_ids:
notifications.append([(cr.dbname, 'im_chat.session', user.id), data])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return message_id
def get_messages(self, cr, uid, uuid, last_id=False, limit=20, context=None):
""" get messages (id desc) from given last_id in the given session """
Session = self.pool['im_chat.session']
if Session.is_in_session(cr, uid, uuid, uid, context=context):
domain = [("to_id.uuid", "=", uuid)]
if last_id:
domain.append(("id", "<", last_id));
return self.search_read(cr, uid, domain, ['id', 'create_date','to_id','from_id', 'type', 'message'], limit=limit, context=context)
return False
class im_chat_presence(osv.Model):
""" im_chat_presence status can be: online, away or offline.
This model is a one2one, but is not attached to res_users to avoid database concurrence errors
"""
_name = 'im_chat.presence'
_columns = {
'user_id' : fields.many2one('res.users', 'Users', required=True, select=True, ondelete="cascade"),
'last_poll': fields.datetime('Last Poll'),
'last_presence': fields.datetime('Last Presence'),
'status' : fields.selection([('online','Online'), ('away','Away'), ('offline','Offline')], 'IM Status'),
}
_defaults = {
'last_poll' : fields.datetime.now,
'last_presence' : fields.datetime.now,
'status' : 'offline'
}
_sql_constraints = [('im_chat_user_status_unique','unique(user_id)', 'A user can only have one IM status.')]
def update(self, cr, uid, presence=True, context=None):
""" register the poll, and change its im status if necessary. It also notify the Bus if the status has changed. """
presence_ids = self.search(cr, uid, [('user_id', '=', uid)], context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
# set the default values
send_notification = True
vals = {
'last_poll': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'status' : presences and presences[0].status or 'offline'
}
# update the user or a create a new one
if not presences:
vals['status'] = 'online'
vals['user_id'] = uid
self.create(cr, uid, vals, context=context)
else:
if presence:
vals['last_presence'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
vals['status'] = 'online'
else:
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
if datetime.datetime.strptime(presences[0].last_presence, DEFAULT_SERVER_DATETIME_FORMAT) < threshold:
vals['status'] = 'away'
send_notification = presences[0].status != vals['status']
# write only if the last_poll is passed TIMEOUT, or if the status has changed
delta = datetime.datetime.now() - datetime.datetime.strptime(presences[0].last_poll, DEFAULT_SERVER_DATETIME_FORMAT)
if (delta > datetime.timedelta(seconds=TIMEOUT) or send_notification):
self.write(cr, uid, presence_ids, vals, context=context)
# avoid TransactionRollbackError
cr.commit()
# notify if the status has changed
if send_notification:
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname,'im_chat.presence'), {'id': uid, 'im_status': vals['status']})
# gc : disconnect the users having a too old last_poll. 1 on 100 chance to do it.
if random.random() < 0.01:
self.check_users_disconnection(cr, uid, context=context)
return True
def check_users_disconnection(self, cr, uid, context=None):
""" disconnect the users having a too old last_poll """
dt = (datetime.datetime.now() - datetime.timedelta(0, DISCONNECTION_TIMER)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
presence_ids = self.search(cr, uid, [('last_poll', '<', dt), ('status' , '!=', 'offline')], context=context)
self.write(cr, uid, presence_ids, {'status': 'offline'}, context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
notifications = []
for presence in presences:
notifications.append([(cr.dbname,'im_chat.presence'), {'id': presence.user_id.id, 'im_status': presence.status}])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return True
class res_users(osv.Model):
_inherit = "res.users"
def _get_im_status(self, cr, uid, ids, fields, arg, context=None):
""" function computing the im_status field of the users """
r = dict((i, 'offline') for i in ids)
status_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', 'in', ids)], context=context)
status = self.pool['im_chat.presence'].browse(cr, uid, status_ids, context=context)
for s in status:
r[s.user_id.id] = s.status
return r
_columns = {
'im_status' : fields.function(_get_im_status, type="char", string="IM Status"),
}
def im_search(self, cr, uid, name, limit=20, context=None):
""" search users with a name and return its id, name and im_status """
result = [];
# find the employee group
group_employee = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_user')[1]
where_clause_base = " U.active = 't' "
query_params = ()
if name:
where_clause_base += " AND P.name ILIKE %s "
query_params = query_params + ('%'+name+'%',)
# first query to find online employee
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id != %s
AND EXISTS (SELECT 1 FROM res_groups_users_rel G WHERE G.gid = %s AND G.uid = U.id)
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (uid, group_employee, limit))
result = result + cr.dictfetchall()
# second query to find other online people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
# third query to find all other people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM res_users U
LEFT JOIN im_chat_presence S ON S.user_id = U.id
LEFT JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
return result
#----------------------------------------------------------
# Controllers
#----------------------------------------------------------
class Controller(openerp.addons.bus.bus.Controller):
def _poll(self, dbname, channels, last, options):
if request.session.uid:
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
registry.get('im_chat.presence').update(cr, uid, options.get('im_presence', False), context=context)
## For performance issue, the real time status notification is disabled. This means a change of status are still braoadcasted
## but not received by anyone. Otherwise, all listening user restart their longpolling at the same time and cause a 'ConnectionPool Full Error'
## since there is not enought cursors for everyone. Now, when a user open his list of users, an RPC call is made to update his user status list.
##channels.append((request.db,'im_chat.presence'))
# channel to receive message
channels.append((request.db,'im_chat.session', request.uid))
return super(Controller, self)._poll(dbname, channels, last, options)
@openerp.http.route('/im_chat/init', type="json", auth="none")
def init(self):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
notifications = registry['im_chat.message'].init_messages(cr, uid, context=context)
return notifications
@openerp.http.route('/im_chat/post', type="json", auth="none")
def post(self, uuid, message_type, message_content):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
# execute the post method as SUPERUSER_ID
message_id = registry["im_chat.message"].post(cr, openerp.SUPERUSER_ID, uid, uuid, message_type, message_content, context=context)
return message_id
@openerp.http.route(['/im_chat/image/<string:uuid>/<string:user_id>'], type='http', auth="none")
def image(self, uuid, user_id):
registry, cr, context, uid = request.registry, request.cr, request.context, request.session.uid
# get the image
Session = registry.get("im_chat.session")
image_b64 = Session.get_image(cr, openerp.SUPERUSER_ID, uuid, simplejson.loads(user_id), context)
# built the response
image_data = base64.b64decode(image_b64)
headers = [('Content-Type', 'image/png')]
headers.append(('Content-Length', len(image_data)))
return request.make_response(image_data, headers)
@openerp.http.route(['/im_chat/history'], type="json", auth="none")
def history(self, uuid, last_id=False, limit=20):
registry, cr, uid, context = request.registry, request.cr, request.session.uid or openerp.SUPERUSER_ID, request.context
return registry["im_chat.message"].get_messages(cr, uid, uuid, last_id, limit, context=context)
# vim:et:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.