text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# The basic policy gradients loss
def policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return -tf.reduce_mean(
action_dist.logp(train_batch[SampleBatch.ACTIONS]) *
train_batch[Postprocessing.ADVANTAGES])
# This adds the "advantages" column to the sampletrain_batch.
def postprocess_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
return compute_advantages(
sample_batch, 0.0, policy.config["gamma"], use_gae=False)
PGTFPolicy = build_tf_policy(
name="PGTFPolicy",
get_default_config=lambda: ray.rllib.agents.pg.pg.DEFAULT_CONFIG,
postprocess_fn=postprocess_advantages,
loss_fn=policy_gradient_loss)
|
{
"content_hash": "ab0d6c81d9cbddb4d5b9df46ac8d2cef",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 33.91891891891892,
"alnum_prop": 0.69800796812749,
"repo_name": "ujvl/ray-ng",
"id": "1b8f6a4b6fcb6c9040a2dad1be5d4229c5fbe840",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/agents/pg/pg_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
}
|
import MapReduce
import sys
"""
SQL style Joins in MapReduce
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
value = str(record[0])
num = str(record[1])
mr.emit_intermediate(num, (value, record))
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
x,y = list_of_values[0]
for val in range(len(list_of_values)):
f = []
a,b = list_of_values[val]
if a == 'line_item':
f += y
f += b
mr.emit(f)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
{
"content_hash": "d535cb9d4812f86aa2fda2c1786de87c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 44,
"avg_line_length": 20.75,
"alnum_prop": 0.5850066934404283,
"repo_name": "P7h/IntroToDataScience__Coursera_Course",
"id": "2241d0bcc483df8ae598bcac2ffa98c9d73b4660",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Week#3__Assignment#3/join.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17810"
}
],
"symlink_target": ""
}
|
""" Optimizations of built-ins to built-in calls.
"""
import math
import sys
from nuitka.nodes.ParameterSpecs import (
ParameterSpec,
TooManyArguments,
matchCall
)
from nuitka.utils.Utils import python_version
class BuiltinParameterSpec(ParameterSpec):
def __init__(self, name, arg_names, default_count, list_star_arg = None,
dict_star_arg = None):
ParameterSpec.__init__(
self,
name = name,
normal_args = arg_names,
list_star_arg = list_star_arg,
dict_star_arg = dict_star_arg,
default_count = default_count,
kw_only_args = ()
)
self.builtin = __builtins__[name]
def __repr__(self):
return "<BuiltinParameterSpec %s>" % self.name
def getName(self):
return self.name
def isCompileTimeComputable(self, values):
# By default, we make this dependent on the ability to compute the
# arguments, which is of course a good start for most cases, so this
# is for overloads, pylint: disable=R0201
for value in values:
if value is not None and not value.isCompileTimeConstant():
return False
return True
def simulateCall(self, given_values):
# Using star dict call for simulation and catch any exception as really
# fatal, pylint: disable=W0703
try:
given_normal_args = given_values[:len(self.normal_args)]
if self.list_star_arg:
given_list_star_args = given_values[len(self.normal_args)]
else:
given_list_star_args = None
if self.dict_star_arg:
given_dict_star_args = given_values[ -1 ]
else:
given_dict_star_args = None
arg_dict = {}
for arg_name, given_value in zip(self.normal_args, given_normal_args):
assert type(given_value) not in (tuple, list), \
("do not like a tuple %s" % (given_value,))
if given_value is not None:
arg_dict[ arg_name ] = given_value.getCompileTimeConstant()
if given_dict_star_args:
for given_dict_star_arg in reversed(given_dict_star_args):
arg_name = given_dict_star_arg.getKey().getCompileTimeConstant()
arg_value = given_dict_star_arg.getValue().getCompileTimeConstant()
arg_dict[arg_name] = arg_value
except Exception as e:
sys.exit("Fatal problem: %r" % e)
if given_list_star_args:
return self.builtin(
*(value.getCompileTimeConstant() for value in given_list_star_args),
**arg_dict
)
else:
return self.builtin(**arg_dict)
class BuiltinParameterSpecNoKeywords(BuiltinParameterSpec):
def allowsKeywords(self):
return False
def simulateCall(self, given_values):
# Using star dict call for simulation and catch any exception as really fatal,
# pylint: disable=W0703
try:
if self.list_star_arg:
given_list_star_arg = given_values[ len(self.normal_args) ]
else:
given_list_star_arg = None
arg_list = []
refuse_more = False
for _arg_name, given_value in zip(self.normal_args, given_values):
assert type(given_value) not in (tuple, list), ("do not like tuple %s" % (given_value,))
if given_value is not None:
if not refuse_more:
arg_list.append(given_value.getCompileTimeConstant())
else:
assert False
else:
refuse_more = True
if given_list_star_arg is not None:
arg_list += [ value.getCompileTimeConstant() for value in given_list_star_arg ]
except Exception as e:
print >> sys.stderr, "Fatal error: ",
import traceback
traceback.print_exc()
sys.exit(repr(e))
return self.builtin(*arg_list)
class BuiltinParameterSpecExceptions(BuiltinParameterSpec):
def __init__(self, exception_name, default_count):
# TODO: Parameter default_count makes no sense for exceptions probably.
BuiltinParameterSpec.__init__(
self,
name = exception_name,
arg_names = (),
default_count = default_count,
list_star_arg = "args"
)
def allowsKeywords(self):
return False
def getKeywordRefusalText(self):
return "exceptions.%s does not take keyword arguments" % self.name
def getCallableName(self):
return "exceptions." + self.getName()
def makeBuiltinParameterSpec(exception_name):
if exception_name == "ImportError" and python_version >= 330:
# TODO: Create this beast, needs keyword only arguments to be supported,
# currently user of this function must take care to not have them.
pass
return BuiltinParameterSpecExceptions(
exception_name = exception_name,
default_count = 0
)
builtin_int_spec = BuiltinParameterSpec("int", ('x', "base"), 2)
# These builtins are only available for Python2
if python_version < 300:
builtin_long_spec = BuiltinParameterSpec(
"long",
('x', "base"),
2
)
builtin_execfile_spec = BuiltinParameterSpecNoKeywords(
"execfile",
("filename", "globals", "locals"),
2
)
builtin_unicode_spec = BuiltinParameterSpec(
"unicode",
("string", "encoding", "errors"),
3
)
builtin_xrange_spec = BuiltinParameterSpec(
"xrange",
("start", "stop", "step"),
2
)
builtin_bool_spec = BuiltinParameterSpec("bool", ('x',), 1)
builtin_float_spec = BuiltinParameterSpec("float", ('x',), 1)
builtin_complex_spec = BuiltinParameterSpec("complex", ("real", "imag"), 2)
# This built-in have variable parameters for Python2/3
if python_version < 300:
builtin_str_spec = BuiltinParameterSpec("str", ("object",), 1)
else:
builtin_str_spec = BuiltinParameterSpec("str", ("object", "encoding", "errors"), 3)
builtin_len_spec = BuiltinParameterSpecNoKeywords("len", ("object",), 0)
builtin_dict_spec = BuiltinParameterSpec("dict", (), 0, "list_args", "dict_args")
builtin_len_spec = BuiltinParameterSpecNoKeywords("len", ("object",), 0)
builtin_tuple_spec = BuiltinParameterSpec("tuple", ("sequence",), 1)
builtin_list_spec = BuiltinParameterSpec("list", ("sequence",), 1)
builtin_set_spec = BuiltinParameterSpecNoKeywords("set", ("iterable",), 1)
builtin_import_spec = BuiltinParameterSpec("__import__", ("name", "globals", "locals", "fromlist", "level"), 4)
builtin_open_spec = BuiltinParameterSpec("open", ("name", "mode", "buffering"), 3)
builtin_chr_spec = BuiltinParameterSpecNoKeywords("chr", ('i',), 0)
builtin_ord_spec = BuiltinParameterSpecNoKeywords("ord", ('c',), 0)
builtin_bin_spec = BuiltinParameterSpecNoKeywords("bin", ("number",), 0)
builtin_oct_spec = BuiltinParameterSpecNoKeywords("oct", ("number",), 0)
builtin_hex_spec = BuiltinParameterSpecNoKeywords("hex", ("number",), 0)
builtin_id_spec = BuiltinParameterSpecNoKeywords("id", ("object",), 0)
builtin_repr_spec = BuiltinParameterSpecNoKeywords("repr", ("object",), 0)
builtin_dir_spec = BuiltinParameterSpecNoKeywords("dir", ("object",), 1)
builtin_vars_spec = BuiltinParameterSpecNoKeywords("vars", ("object",), 1)
builtin_locals_spec = BuiltinParameterSpecNoKeywords("locals", (), 0)
builtin_globals_spec = BuiltinParameterSpecNoKeywords("globals", (), 0)
builtin_eval_spec = BuiltinParameterSpecNoKeywords("eval", ("source", "globals", "locals"), 2)
if python_version < 300:
builtin_compile_spec = BuiltinParameterSpec(
"compile",
("source", "filename", "mode", "flags", "dont_inherit"),
2
)
else:
builtin_compile_spec = BuiltinParameterSpec(
"compile",
("source", "filename", "mode", "flags", "dont_inherit", "optimize"),
3
)
if python_version >= 300:
builtin_exec_spec = BuiltinParameterSpecNoKeywords(
"exec",
("source", "globals", "locals"),
2
)
# Note: Iter in fact names its first argument if the default applies
# "collection", fixed up in a wrapper.
builtin_iter_spec = BuiltinParameterSpecNoKeywords("iter", ("callable", "sentinel"), 1)
builtin_next_spec = BuiltinParameterSpecNoKeywords("next", ("iterator", "default"), 1)
# Note: type with 1 and type with 3 arguments are too different.
builtin_type1_spec = BuiltinParameterSpecNoKeywords("type", ("object",), 0)
builtin_type3_spec = BuiltinParameterSpecNoKeywords("type", ("name", "bases", "dict"), 0)
builtin_super_spec = BuiltinParameterSpecNoKeywords("super", ("type", "object"), 1 if python_version < 300 else 2)
builtin_hasattr_spec = BuiltinParameterSpecNoKeywords("hasattr", ("object", "name"), 0)
builtin_getattr_spec = BuiltinParameterSpecNoKeywords("getattr", ("object", "name", "default"), 1)
builtin_setattr_spec = BuiltinParameterSpecNoKeywords("setattr", ("object", "name", "value"), 0)
builtin_isinstance_spec = BuiltinParameterSpecNoKeywords("isinstance", ("instance", "classes"), 0)
builtin_bytearray_spec = BuiltinParameterSpecNoKeywords("bytearray", ("iterable_of_ints",), 1)
# Beware: One argument defines stop, not start.
builtin_slice_spec = BuiltinParameterSpecNoKeywords("slice", ("start", "stop", "step"), 2)
builtin_hash_spec = BuiltinParameterSpecNoKeywords("hash", ("object",), 0)
class BuiltinRangeSpec(BuiltinParameterSpecNoKeywords):
def __init__(self, *args):
BuiltinParameterSpecNoKeywords.__init__(self, *args)
def isCompileTimeComputable(self, values):
# For ranges, we need have many cases that can prevent the ability
# to pre-compute, pylint: disable=R0911,R0912
result = BuiltinParameterSpecNoKeywords.isCompileTimeComputable(
self,
values = values
)
if result:
arg_count = len(values)
if arg_count == 1:
low = values[0]
# If it's not a number constant, we can compute the exception
# that will be raised.
if not low.isNumberConstant():
return True
return low.getConstant() < 256
elif arg_count == 2:
low, high = values
# If it's not a number constant, we can compute the exception
# that will be raised.
if not low.isNumberConstant() or not high.isNumberConstant():
return True
return high.getConstant() - low.getConstant() < 256
elif arg_count == 3:
low, high, step = values
if not low.isNumberConstant() or \
not high.isNumberConstant() or \
not step.isNumberConstant():
return True
low = low.getConstant()
high = high.getConstant()
step = step.getConstant()
# It's going to give a ZeroDivisionError in this case.
if step == 0:
return True
if low < high:
if step < 0:
return True
else:
return math.ceil(float(high - low) / step) < 256
else:
if step > 0:
return True
else:
return math.ceil(float(high - low) / step) < 256
else:
assert False
else:
return False
builtin_range_spec = BuiltinRangeSpec("range", ("start", "stop", "step"), 2)
def extractBuiltinArgs(node, builtin_spec, builtin_class,
empty_special_class = None):
try:
kw = node.getCallKw()
# TODO: Could check for too many / too few, even if they are unknown, we
# might raise that error, but that need not be optimized immediately.
if kw is not None:
if not kw.isMappingWithConstantStringKeys():
return None
pairs = kw.getMappingStringKeyPairs()
if pairs and not builtin_spec.allowsKeywords():
raise TooManyArguments(
TypeError(builtin_spec.getKeywordRefusalText())
)
else:
pairs = ()
args = node.getCallArgs()
if args:
if not args.canPredictIterationValues():
return None
positional = args.getIterationValues()
else:
positional = ()
if not positional and not pairs and empty_special_class is not None:
return empty_special_class(source_ref = node.getSourceReference())
args_dict = matchCall(
func_name = builtin_spec.getName(),
args = builtin_spec.getArgumentNames(),
star_list_arg = builtin_spec.getStarListArgumentName(),
star_dict_arg = builtin_spec.getStarDictArgumentName(),
num_defaults = builtin_spec.getDefaultCount(),
positional = positional,
pairs = pairs
)
except TooManyArguments as e:
from nuitka.nodes.NodeMakingHelpers import (
makeRaiseExceptionReplacementExpressionFromInstance,
wrapExpressionWithSideEffects
)
return wrapExpressionWithSideEffects(
new_node = makeRaiseExceptionReplacementExpressionFromInstance(
expression = node,
exception = e.getRealException()
),
old_node = node,
side_effects = node.extractPreCallSideEffects()
)
args_list = []
for argument_name in builtin_spec.getArgumentNames():
args_list.append(args_dict[argument_name])
if builtin_spec.getStarListArgumentName() is not None:
args_list.append(args_dict[builtin_spec.getStarListArgumentName()])
if builtin_spec.getStarDictArgumentName() is not None:
args_list.append(args_dict[builtin_spec.getStarDictArgumentName()])
# Using list reference for passing the arguments without names,
result = builtin_class(
*args_list,
source_ref = node.getSourceReference()
)
result.setCompatibleSourceReference(node.getCompatibleSourceReference())
return result
|
{
"content_hash": "e91dc01a23fa981594264537255def1d",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 114,
"avg_line_length": 35.826405867970664,
"alnum_prop": 0.5987169862826724,
"repo_name": "wfxiang08/Nuitka",
"id": "9c82725f90bee1a8a8a525ee383287c759d267f5",
"size": "15433",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nuitka/optimizations/BuiltinOptimization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "441058"
},
{
"name": "Python",
"bytes": "4431574"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
'''
Relative Layout
===============
.. versionadded:: 1.4.0
This layout allows you to set relative coordinates for children. If you want
absolute positioning, use the :class:`~kivy.uix.floatlayout.FloatLayout`.
The :class:`RelativeLayout` class behaves just like the regular
:class:`FloatLayout` except that its child widgets are positioned relative to
the layout.
For example, if you create a RelativeLayout, add a widget with position =
(0,0), the child widget will also move when you change the position of the
RelativeLayout. The child widgets coordinates remain (0,0) i.e. they are
always relative to the containing layout.
Coordinate Systems
------------------
Window coordinates
~~~~~~~~~~~~~~~~~~
By default, there's only one coordinate system that defines the position of
widgets and touch events dispatched to them: the window coordinate system,
which places (0, 0) at the bottom left corner of
the window. Although there are other coordinate systems defined, e.g. local
and parent coordinates, these coordinate systems are identical to the window
coordinate system as long as a relative layout type widget is not in the
widget's parent stack. When widget.pos is read or a touch is received,
the coordinate values are in parent coordinates, but as mentioned, these are
identical to window coordinates, even in complex widget stacks.
For example::
BoxLayout:
Label:
text: 'Left'
Button:
text: 'Middle'
on_touch_down: print('Middle: {}'.format(args[1].pos))
BoxLayout:
on_touch_down: print('Box: {}'.format(args[1].pos))
Button:
text: 'Right'
on_touch_down: print('Right: {}'.format(args[1].pos))
When the middle button is clicked and the touch propagates through the
different parent coordinate systems, it prints the following::
>>> Box: (430.0, 282.0)
>>> Right: (430.0, 282.0)
>>> Middle: (430.0, 282.0)
As claimed, the touch has identical coordinates to the window coordinates
in every coordinate system. :meth:`~kivy.uix.widget.Widget.collide_point`
for example, takes the point in window coordinates.
Parent coordinates
~~~~~~~~~~~~~~~~~~
Other :class:`RelativeLayout` type widgets are
:class:`~kivy.uix.scatter.Scatter`,
:class:`~kivy.uix.scatterlayout.ScatterLayout`,
and :class:`~kivy.uix.scrollview.ScrollView`. If such a special widget is in
the parent stack, only then does the parent and local coordinate system
diverge from the window coordinate system. For each such widget in the stack, a
coordinate system with (0, 0) of that coordinate system being at the bottom
left corner of that widget is created. **Position and touch coordinates
received and read by a widget are in the coordinate system of the most
recent special widget in its parent stack (not including itself) or in window
coordinates if there are none** (as in the first example). We call these
coordinates parent coordinates.
For example::
BoxLayout:
Label:
text: 'Left'
Button:
text: 'Middle'
on_touch_down: print('Middle: {}'.format(args[1].pos))
RelativeLayout:
on_touch_down: print('Relative: {}'.format(args[1].pos))
Button:
text: 'Right'
on_touch_down: print('Right: {}'.format(args[1].pos))
Clicking on the middle button prints::
>>> Relative: (396.0, 298.0)
>>> Right: (-137.33, 298.0)
>>> Middle: (396.0, 298.0)
As the touch propagates through the widgets, for each widget, the
touch is received in parent coordinates. Because both the relative and middle
widgets don't have these special widgets in their parent stack, the touch is
the same as window coordinates. Only the right widget, which has a
RelativeLayout in its parent stack, receives the touch in coordinates relative
to that RelativeLayout which is different than window coordinates.
Local and Widget coordinates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When expressed in parent coordinates, the position is expressed in the
coordinates of the most recent special widget in its parent stack, not
including itself. When expressed in local or widget coordinates, the widgets
themselves are also included.
Changing the above example to transform the parent coordinates into local
coordinates::
BoxLayout:
Label:
text: 'Left'
Button:
text: 'Middle'
on_touch_down: print('Middle: {}'.format(\
self.to_local(*args[1].pos)))
RelativeLayout:
on_touch_down: print('Relative: {}'.format(\
self.to_local(*args[1].pos)))
Button:
text: 'Right'
on_touch_down: print('Right: {}'.format(\
self.to_local(*args[1].pos)))
Now, clicking on the middle button prints::
>>> Relative: (-135.33, 301.0)
>>> Right: (-135.33, 301.0)
>>> Middle: (398.0, 301.0)
This is because now the relative widget also expresses the coordinates
relative to itself.
Coordinate transformations
~~~~~~~~~~~~~~~~~~~~~~~~~~
:class:`~kivy.uix.widget.Widget` provides 4 functions to transform coordinates
between the various coordinate systems. For now, we assume that the `relative`
keyword of these functions is `False`.
:meth:`~kivy.uix.widget.Widget.to_widget` takes the coordinates expressed in
window coordinates and returns them in local (widget) coordinates.
:meth:`~kivy.uix.widget.Widget.to_window` takes the coordinates expressed in
local coordinates and returns them in window coordinates.
:meth:`~kivy.uix.widget.Widget.to_parent` takes the coordinates expressed in
local coordinates and returns them in parent coordinates.
:meth:`~kivy.uix.widget.Widget.to_local` takes the coordinates expressed in
parent coordinates and returns them in local coordinates.
Each of the 4 transformation functions take a `relative` parameter. When the
relative parameter is True, the coordinates are returned or originate in
true relative coordinates - relative to a coordinate system with its (0, 0) at
the bottom left corner of the widget in question.
.. versionchanged:: 1.7.0
Prior to version 1.7.0, the :class:`RelativeLayout` was implemented as a
:class:`~kivy.uix.floatlayout.FloatLayout` inside a
:class:`~kivy.uix.scatter.Scatter`. This behaviour/widget has
been renamed to `ScatterLayout`. The :class:`RelativeLayout` now only
supports relative positions (and can't be rotated, scaled or translated on
a multitouch system using two or more fingers). This was done so that the
implementation could be optimized and avoid the heavier calculations of
:class:`Scatter` (e.g. inverse matrix, recaculating multiple properties
etc.)
'''
__all__ = ('RelativeLayout', )
from kivy.uix.floatlayout import FloatLayout
class RelativeLayout(FloatLayout):
'''RelativeLayout class, see module documentation for more information.
'''
def __init__(self, **kw):
self.content = FloatLayout()
super(RelativeLayout, self).__init__(**kw)
self.unbind(pos=self._trigger_layout,
pos_hint=self._trigger_layout)
def do_layout(self, *args):
super(RelativeLayout, self).do_layout(pos=(0, 0))
def to_parent(self, x, y, **k):
return (x + self.x, y + self.y)
def to_local(self, x, y, **k):
return (x - self.x, y - self.y)
def on_touch_down(self, touch):
x, y = touch.x, touch.y
touch.push()
touch.apply_transform_2d(self.to_local)
ret = super(RelativeLayout, self).on_touch_down(touch)
touch.pop()
return ret
def on_touch_move(self, touch):
x, y = touch.x, touch.y
touch.push()
touch.apply_transform_2d(self.to_local)
ret = super(RelativeLayout, self).on_touch_move(touch)
touch.pop()
return ret
def on_touch_up(self, touch):
x, y = touch.x, touch.y
touch.push()
touch.apply_transform_2d(self.to_local)
ret = super(RelativeLayout, self).on_touch_up(touch)
touch.pop()
return ret
|
{
"content_hash": "39db0e296bcb2784f8d01627a35416e3",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 36.722727272727276,
"alnum_prop": 0.6849857655650452,
"repo_name": "ehealthafrica-ci/kivy",
"id": "d7566a83f43e63e6cd5c1af6dbaf069182b7a36f",
"size": "8079",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kivy/uix/relativelayout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2902"
},
{
"name": "C",
"bytes": "159917"
},
{
"name": "Emacs Lisp",
"bytes": "9603"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "3254"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "2798516"
},
{
"name": "Shell",
"bytes": "8828"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operacion', '0026_merge_20161006_1512'),
]
operations = [
migrations.AlterField(
model_name='orden',
name='fin',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{
"content_hash": "c6bff11c8a552cf37f85d35754a65261",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 21.61111111111111,
"alnum_prop": 0.596401028277635,
"repo_name": "exildev/AutoLavadox",
"id": "5233ec3e595914c3512325aa2a0c21ae055fa0af",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "operacion/migrations/0027_auto_20161006_1512.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "238948"
},
{
"name": "HTML",
"bytes": "38341"
},
{
"name": "JavaScript",
"bytes": "620499"
},
{
"name": "PLpgSQL",
"bytes": "19460"
},
{
"name": "Python",
"bytes": "226893"
},
{
"name": "Shell",
"bytes": "3252"
}
],
"symlink_target": ""
}
|
import os
import urllib2
from contextlib import closing
from pysolar.files.utils import MissingFileError
class FileManager:
"""
Builds more complex file download behaviours.
config: An instance of the Configuration class.
"""
def __init__(self, config):
self.remote = RemoteManager(config)
self.cache = CacheManager(config)
self.template = config.file_template
def download(self, filenames):
'''
Takes a list of file name strings and iterates
through the cache download method.
'''
for filename in filenames:
self.cache.download(filename, self.remote)
def download_by_template(self, strings):
'''
Takes a list of strings, generates a list of file
names with the configuration template and passes it
to the file_by_name methods for download.
'''
filenames = self.filenames_from_template(strings)
self.download(filenames)
def filenames_from_template(self, strings):
'''
Takes a list of strings and maps the configuration
template onto them to generate a list of file names.
'''
return map(lambda f: self.template % (f), strings)
class CacheManager:
"""
Class to manage the local data file cache.
"""
def __init__(self, config):
'''
Checks if the cache path provided in the configuration
exists. Raises a ValueError if not.
'''
if os.path.exists(config.cache):
self.path = config.cache
else:
raise ValueError("Cache directory does not exist.")
def file_exists(self, filename):
'''
Checks whether a file named with the string filename
exists in the local cache.
Returns True if it exists or False if not.
'''
return os.path.exists(os.path.join(self.path, filename))
def write_file(self, filename, data):
'''
Write a file to the local cache only if it does not
already exists.
filename: string defining the name of the file.
data : string for the contents of the new file.
'''
if not self.file_exists(filename):
with closing(self.__open(filename, 'w')) as cached:
cached.write(data)
def read(self, filename):
'''
Read the contents of a named file from the local cache.
filename: string defining the name of the file.
'''
with closing(self.__open(filename)) as cached:
return cached.read()
def download(self, filename, remote):
'''
Downloads a single named file from a remote location.
filename: a string for the filename.
remote : a remote repository object.
'''
if not self.file_exists(filename):
content = remote.read(filename)
self.write_file(filename, content)
def __open(self, filename, op='r'):
return open(os.path.join(self.path, filename), op)
class RemoteManager:
"""
Class that represents a remote repository of data files.
config: An instance of the Configuration class.
"""
def __init__(self, config):
self.url = config.source
def read(self, filename):
'''
Reads an existing remote file into memory.
Raises:
MissingFileError if the file cannot be found.
URLError is the url is unknown.
'''
with closing(self.__open(filename)) as remote:
content = remote.read()
return content
def __open(self, filename):
''' Raises a URLError is the url is unknown. '''
try:
return urllib2.urlopen(self.url + '/' + filename)
except urllib2.HTTPError as ex:
self.__handle_http_error(ex, filename)
def __handle_http_error(self, http_error, filename):
if http_error.code == 404:
raise MissingFileError("%s is missing in the remote location." %
filename)
else:
raise
|
{
"content_hash": "c53515e26605b27d41b46a08052e3b72",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 32.3875968992248,
"alnum_prop": 0.5866443274293921,
"repo_name": "conceptric/pysolar",
"id": "a62b549d68fdaf19c7f0a2c3f29fca26ae602865",
"size": "4178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysolar/files/repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49736"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sys
from os import access
from os import getenv
from os import X_OK
jar_file = 'SearchGUI-2.9.0.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
{
"content_hash": "584f3a25385b923acbad3aca2fabb0e3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 28.51851851851852,
"alnum_prop": 0.6220779220779221,
"repo_name": "zwanli/bioconda-recipes",
"id": "6d34917380664c24e5073e650bd1da9e95c01cf9",
"size": "2612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/searchgui/2.9.0/searchgui.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "488"
},
{
"name": "C",
"bytes": "102428"
},
{
"name": "C++",
"bytes": "486"
},
{
"name": "FORTRAN",
"bytes": "1040"
},
{
"name": "Perl",
"bytes": "84584"
},
{
"name": "Perl6",
"bytes": "16"
},
{
"name": "Python",
"bytes": "126004"
},
{
"name": "Shell",
"bytes": "715780"
}
],
"symlink_target": ""
}
|
"""Tests for the AVM Fritz!Box integration."""
from __future__ import annotations
from unittest.mock import Mock
from aiohttp import ClientSession
from homeassistant.components.diagnostics import REDACTED
from homeassistant.components.fritzbox.const import DOMAIN as FB_DOMAIN
from homeassistant.components.fritzbox.diagnostics import TO_REDACT
from homeassistant.const import CONF_DEVICES
from homeassistant.core import HomeAssistant
from . import setup_config_entry
from .const import MOCK_CONFIG
from tests.components.diagnostics import get_diagnostics_for_config_entry
async def test_entry_diagnostics(
hass: HomeAssistant, hass_client: ClientSession, fritz: Mock
):
"""Test config entry diagnostics."""
assert await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0])
entries = hass.config_entries.async_entries(FB_DOMAIN)
entry_dict = entries[0].as_dict()
for key in TO_REDACT:
entry_dict["data"][key] = REDACTED
result = await get_diagnostics_for_config_entry(hass, hass_client, entries[0])
assert result == {"entry": entry_dict, "data": {}}
|
{
"content_hash": "b20a7b60953f97c5afbce9d60495b599",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 33.72727272727273,
"alnum_prop": 0.7601078167115903,
"repo_name": "nkgilley/home-assistant",
"id": "9efe6c23902b280c060981a58aeb71081c061b84",
"size": "1113",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/fritzbox/test_diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import os
import argparse
# Creates a new fileset with all the necessary files.
# In case the module does not exist, also the module is created.
def generate_fileset(base_path, metricbeat_path, module, fileset):
generate_module(base_path, metricbeat_path, module, fileset)
fileset_path = base_path + "/module/" + module + "/" + fileset
meta_path = fileset_path + "/_meta"
if os.path.isdir(fileset_path):
print("Fileset already exists. Skipping creating fileset {}"
.format(fileset))
return
os.makedirs(meta_path)
os.makedirs(os.path.join(fileset_path, "test"))
templates = metricbeat_path + "/scripts/module/fileset/"
content = load_file(templates + "fields.yml", module, fileset)
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
os.makedirs(os.path.join(fileset_path, "config"))
content = load_file(templates + "/config/config.yml", module, fileset)
with open("{}/config/{}.yml".format(fileset_path, fileset), "w") as f:
f.write(content)
os.makedirs(os.path.join(fileset_path, "ingest"))
content = load_file(templates + "/ingest/pipeline.json", module, fileset)
with open("{}/ingest/pipeline.json".format(fileset_path), "w") as f:
f.write(content)
content = load_file(templates + "/manifest.yml", module, fileset)
with open("{}/manifest.yml".format(fileset_path), "w") as f:
f.write(content)
print("Fileset {} created.".format(fileset))
def generate_module(base_path, metricbeat_path, module, fileset):
module_path = base_path + "/module/" + module
meta_path = module_path + "/_meta"
if os.path.isdir(module_path):
print("Module already exists. Skipping creating module {}"
.format(module))
return
os.makedirs(meta_path)
templates = metricbeat_path + "/scripts/module/"
content = load_file(templates + "fields.yml", module, "")
with open(meta_path + "/fields.yml", "w") as f:
f.write(content)
content = load_file(templates + "docs.asciidoc", module, "")
with open(meta_path + "/docs.asciidoc", "w") as f:
f.write(content)
print("Module {} created.".format(module))
def load_file(file, module, fileset):
content = ""
with open(file) as f:
content = f.read()
return content.replace("{module}", module).replace("{fileset}", fileset)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates a fileset")
parser.add_argument("--module", help="Module name")
parser.add_argument("--fileset", help="Fileset name")
parser.add_argument("--path", help="Beat path")
parser.add_argument("--es_beats",
help="The path to the general beats folder")
args = parser.parse_args()
if args.path is None:
args.path = './'
print("Set default path for beat path: " + args.path)
if args.es_beats is None:
args.es_beats = '../'
print("Set default path for es_beats path: " + args.es_beats)
if args.module is None or args.module == '':
args.module = raw_input("Module name: ")
if args.fileset is None or args.fileset == '':
args.fileset = raw_input("Fileset name: ")
path = os.path.abspath(args.path)
filebeat_path = os.path.abspath(args.es_beats + "/filebeat")
generate_fileset(path, filebeat_path, args.module.lower(),
args.fileset.lower())
|
{
"content_hash": "83db6adf4d306df9e04535678299698b",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 32.467289719626166,
"alnum_prop": 0.6269430051813472,
"repo_name": "taitan-org/inflog",
"id": "2f5b0dd6043b6dc7a59b04a805c60e79285eb638",
"size": "3474",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "filebeat/scripts/create_fileset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "Go",
"bytes": "1171204"
},
{
"name": "Makefile",
"bytes": "23899"
},
{
"name": "Python",
"bytes": "250534"
},
{
"name": "Shell",
"bytes": "1141"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals, absolute_import
import sys
disclaimer = """# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
firstLine = disclaimer.split('\n')[-1].strip()
def disclaimFile(filename):
"""
Adds the standard BEA copyright and Apache license header to provided file.
@ In, filename, str, file (including path and extension) to modify
@ Out, None
"""
# read in file contents
with open(filename,'r') as f:
contents = f.read()
# check for contents -> note this might not work if file formatted with carriage returns besides \n
found = disclaimer in contents
# nothing to do if it's there
if found:
print('Disclaimer already present in',filename)
return
# otherwise, add it
contents = disclaimer + contents
with open(filename,'w') as f:
f.write(contents)
print('Disclaimer added to',filename)
if __name__=='__main__':
targets = sys.argv[1:]
for t in targets:
disclaimFile(t)
|
{
"content_hash": "1388c78e6ac1848e56a844ea31083650",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 101,
"avg_line_length": 33.68888888888889,
"alnum_prop": 0.7170184696569921,
"repo_name": "joshua-cogliati-inl/raven",
"id": "ea47328913fea64a6d34b8c511684eb2abcefc9f",
"size": "2106",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "developer_tools/addBEADisclaimer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556080"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "6952659"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8574"
},
{
"name": "Shell",
"bytes": "124279"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
}
|
"""Tests for learn.estimators.dynamic_rnn_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import target_column as target_column_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import dynamic_rnn_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IdentityRNNCell(rnn.RNNCell):
def __init__(self, state_size, output_size):
self._state_size = state_size
self._output_size = output_size
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state):
return array_ops.identity(inputs), array_ops.ones(
[array_ops.shape(inputs)[0], self.state_size])
class MockTargetColumn(object):
def __init__(self, num_label_columns=None):
self._num_label_columns = num_label_columns
def get_eval_ops(self, features, activations, labels, metrics):
raise NotImplementedError(
'MockTargetColumn.get_eval_ops called unexpectedly.')
def logits_to_predictions(self, flattened_activations, proba=False):
raise NotImplementedError(
'MockTargetColumn.logits_to_predictions called unexpectedly.')
def loss(self, activations, labels, features):
raise NotImplementedError('MockTargetColumn.loss called unexpectedly.')
@property
def num_label_columns(self):
if self._num_label_columns is None:
raise ValueError('MockTargetColumn.num_label_columns has not been set.')
return self._num_label_columns
def set_num_label_columns(self, n):
self._num_label_columns = n
def sequence_length_mask(values, lengths):
masked = values
for i, length in enumerate(lengths):
masked[i, length:, :] = np.zeros_like(masked[i, length:, :])
return masked
class DynamicRnnEstimatorTest(test.TestCase):
NUM_RNN_CELL_UNITS = 8
NUM_LABEL_COLUMNS = 6
INPUTS_COLUMN = feature_column.real_valued_column(
'inputs', dimension=NUM_LABEL_COLUMNS)
def setUp(self):
super(DynamicRnnEstimatorTest, self).setUp()
self.rnn_cell = rnn_cell.BasicRNNCell(self.NUM_RNN_CELL_UNITS)
self.mock_target_column = MockTargetColumn(
num_label_columns=self.NUM_LABEL_COLUMNS)
location = feature_column.sparse_column_with_keys(
'location', keys=['west_side', 'east_side', 'nyc'])
location_onehot = feature_column.one_hot_column(location)
self.context_feature_columns = [location_onehot]
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
wire_cast_embedded = feature_column.embedding_column(wire_cast, dimension=8)
measurements = feature_column.real_valued_column(
'measurements', dimension=2)
self.sequence_feature_columns = [measurements, wire_cast_embedded]
def GetColumnsToTensors(self):
"""Get columns_to_tensors matching setUp(), in the current default graph."""
return {
'location':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['west_side', 'west_side', 'nyc'],
dense_shape=[3, 1]),
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0]],
values=[b'marlo', b'stringer',
b'omar', b'stringer', b'marlo',
b'marlo'],
dense_shape=[3, 2, 2]),
'measurements':
random_ops.random_uniform(
[3, 2, 2], seed=4711)
}
def GetClassificationTargetsOrNone(self, mode):
"""Get targets matching setUp() and mode, in the current default graph."""
return (random_ops.random_uniform(
[3, 2, 1], 0, 2, dtype=dtypes.int64, seed=1412) if
mode != model_fn_lib.ModeKeys.INFER else None)
def testBuildSequenceInputInput(self):
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.GetColumnsToTensors(), self.sequence_feature_columns,
self.context_feature_columns)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sequence_input_val = sess.run(sequence_input)
expected_shape = np.array([
3, # expected batch size
2, # padded sequence length
3 + 8 + 2 # location keys + embedding dim + measurement dimension
])
self.assertAllEqual(expected_shape, sequence_input_val.shape)
def testConstructRNN(self):
initial_state = None
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.GetColumnsToTensors(), self.sequence_feature_columns,
self.context_feature_columns)
activations_t, final_state_t = dynamic_rnn_estimator.construct_rnn(
initial_state, sequence_input, self.rnn_cell,
self.mock_target_column.num_label_columns)
# Obtain values of activations and final state.
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
activations, final_state = sess.run([activations_t, final_state_t])
expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
self.assertAllEqual(expected_activations_shape, activations.shape)
expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
self.assertAllEqual(expected_state_shape, final_state.shape)
def testGetOutputAlternatives(self):
test_cases = (
(rnn_common.PredictionType.SINGLE_VALUE,
constants.ProblemType.CLASSIFICATION,
{prediction_key.PredictionKey.CLASSES: True,
prediction_key.PredictionKey.PROBABILITIES: True,
dynamic_rnn_estimator._get_state_name(0): True},
{'dynamic_rnn_output':
(constants.ProblemType.CLASSIFICATION,
{prediction_key.PredictionKey.CLASSES: True,
prediction_key.PredictionKey.PROBABILITIES: True})}),
(rnn_common.PredictionType.SINGLE_VALUE,
constants.ProblemType.LINEAR_REGRESSION,
{prediction_key.PredictionKey.SCORES: True,
dynamic_rnn_estimator._get_state_name(0): True,
dynamic_rnn_estimator._get_state_name(1): True},
{'dynamic_rnn_output':
(constants.ProblemType.LINEAR_REGRESSION,
{prediction_key.PredictionKey.SCORES: True})}),
(rnn_common.PredictionType.MULTIPLE_VALUE,
constants.ProblemType.CLASSIFICATION,
{prediction_key.PredictionKey.CLASSES: True,
prediction_key.PredictionKey.PROBABILITIES: True,
dynamic_rnn_estimator._get_state_name(0): True},
None))
for pred_type, prob_type, pred_dict, expected_alternatives in test_cases:
actual_alternatives = dynamic_rnn_estimator._get_output_alternatives(
pred_type, prob_type, pred_dict)
self.assertEqual(expected_alternatives, actual_alternatives)
# testGetDynamicRnnModelFn{Train,Eval,Infer}() test which fields
# of ModelFnOps are set depending on mode.
def testGetDynamicRnnModelFnTrain(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.TRAIN)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetDynamicRnnModelFnEval(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.EVAL)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetDynamicRnnModelFnInfer(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.INFER)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept both.
self.assertFalse(model_fn_ops.eval_metric_ops)
def _GetModelFnOpsForMode(self, mode):
"""Helper for testGetDynamicRnnModelFn{Train,Eval,Infer}()."""
model_fn = dynamic_rnn_estimator._get_dynamic_rnn_model_fn(
cell_type='basic_rnn',
num_units=[10],
target_column=target_column_lib.multi_class_target(n_classes=2),
# Only CLASSIFICATION yields eval metrics to test for.
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
optimizer='SGD',
sequence_feature_columns=self.sequence_feature_columns,
context_feature_columns=self.context_feature_columns,
learning_rate=0.1)
labels = self.GetClassificationTargetsOrNone(mode)
model_fn_ops = model_fn(
features=self.GetColumnsToTensors(), labels=labels, mode=mode)
return model_fn_ops
def testExport(self):
input_feature_key = 'magic_input_feature_key'
def get_input_fn(mode):
def input_fn():
features = self.GetColumnsToTensors()
if mode == model_fn_lib.ModeKeys.INFER:
input_examples = array_ops.placeholder(dtypes.string)
features[input_feature_key] = input_examples
# Real code would now parse features out of input_examples,
# but this test can just stick to the constants above.
return features, self.GetClassificationTargetsOrNone(mode)
return input_fn
model_dir = tempfile.mkdtemp()
def estimator_fn():
return dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_classes=2,
num_units=self.NUM_RNN_CELL_UNITS,
sequence_feature_columns=self.sequence_feature_columns,
context_feature_columns=self.context_feature_columns,
predict_probabilities=True,
model_dir=model_dir)
# Train a bit to create an exportable checkpoint.
estimator_fn().fit(input_fn=get_input_fn(model_fn_lib.ModeKeys.TRAIN),
steps=100)
# Now export, but from a fresh estimator instance, like you would
# in an export binary. That means .export() has to work without
# .fit() being called on the same object.
export_dir = tempfile.mkdtemp()
print('Exporting to', export_dir)
estimator_fn().export(
export_dir,
input_fn=get_input_fn(model_fn_lib.ModeKeys.INFER),
use_deprecated_input_fn=False,
input_feature_key=input_feature_key)
def testStateTupleDictConversion(self):
"""Test `state_tuple_to_dict` and `dict_to_state_tuple`."""
cell_sizes = [5, 3, 7]
# A MultiRNNCell of LSTMCells is both a common choice and an interesting
# test case, because it has two levels of nesting, with an inner class that
# is not a plain tuple.
cell = rnn_cell.MultiRNNCell(
[rnn_cell.LSTMCell(i) for i in cell_sizes])
state_dict = {
dynamic_rnn_estimator._get_state_name(i):
array_ops.expand_dims(math_ops.range(cell_size), 0)
for i, cell_size in enumerate([5, 5, 3, 3, 7, 7])
}
expected_state = (rnn_cell.LSTMStateTuple(
np.reshape(np.arange(5), [1, -1]), np.reshape(np.arange(5), [1, -1])),
rnn_cell.LSTMStateTuple(
np.reshape(np.arange(3), [1, -1]),
np.reshape(np.arange(3), [1, -1])),
rnn_cell.LSTMStateTuple(
np.reshape(np.arange(7), [1, -1]),
np.reshape(np.arange(7), [1, -1])))
actual_state = dynamic_rnn_estimator.dict_to_state_tuple(state_dict, cell)
flattened_state = dynamic_rnn_estimator.state_tuple_to_dict(actual_state)
with self.cached_session() as sess:
(state_dict_val, actual_state_val, flattened_state_val) = sess.run(
[state_dict, actual_state, flattened_state])
def _recursive_assert_equal(x, y):
self.assertEqual(type(x), type(y))
if isinstance(x, (list, tuple)):
self.assertEqual(len(x), len(y))
for i, _ in enumerate(x):
_recursive_assert_equal(x[i], y[i])
elif isinstance(x, np.ndarray):
np.testing.assert_array_equal(x, y)
else:
self.fail('Unexpected type: {}'.format(type(x)))
for k in state_dict_val.keys():
np.testing.assert_array_almost_equal(
state_dict_val[k],
flattened_state_val[k],
err_msg='Wrong value for state component {}.'.format(k))
_recursive_assert_equal(expected_state, actual_state_val)
def testMultiRNNState(self):
"""Test that state flattening/reconstruction works for `MultiRNNCell`."""
batch_size = 11
sequence_length = 16
train_steps = 5
cell_sizes = [4, 8, 7]
learning_rate = 0.1
def get_shift_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length + 1],
0,
2,
dtype=dtypes.int32,
seed=seed)
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
input_dict = {
dynamic_rnn_estimator._get_state_name(i): random_ops.random_uniform(
[batch_size, cell_size], seed=((i + 1) * seed))
for i, cell_size in enumerate([4, 4, 8, 8, 7, 7])
}
input_dict['inputs'] = inputs
return input_dict, labels
return input_fn
seq_columns = [feature_column.real_valued_column('inputs', dimension=1)]
config = run_config.RunConfig(tf_random_seed=21212)
cell_type = 'lstm'
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_classes=2,
num_units=cell_sizes,
sequence_feature_columns=seq_columns,
cell_type=cell_type,
learning_rate=learning_rate,
config=config,
predict_probabilities=True)
train_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=12321)
eval_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
for i, state_size in enumerate([4, 4, 8, 8, 7, 7]):
state_piece = prediction_dict[dynamic_rnn_estimator._get_state_name(i)]
self.assertListEqual(list(state_piece.shape), [batch_size, state_size])
def testMultipleRuns(self):
"""Tests resuming training by feeding state."""
cell_sizes = [4, 7]
batch_size = 11
learning_rate = 0.1
train_sequence_length = 21
train_steps = 121
dropout_keep_probabilities = [0.5, 0.5, 0.5]
prediction_steps = [3, 2, 5, 11, 6]
def get_input_fn(batch_size, sequence_length, state_dict, starting_step=0):
def input_fn():
sequence = constant_op.constant(
[[(starting_step + i + j) % 2 for j in range(sequence_length + 1)]
for i in range(batch_size)],
dtype=dtypes.int32)
labels = array_ops.slice(sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(sequence, [0, 1], [batch_size, sequence_length
])), 2)
input_dict = state_dict
input_dict['inputs'] = inputs
return input_dict, labels
return input_fn
seq_columns = [feature_column.real_valued_column('inputs', dimension=1)]
config = run_config.RunConfig(tf_random_seed=21212)
model_dir = tempfile.mkdtemp()
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_classes=2,
sequence_feature_columns=seq_columns,
num_units=cell_sizes,
cell_type='lstm',
dropout_keep_probabilities=dropout_keep_probabilities,
learning_rate=learning_rate,
config=config,
model_dir=model_dir)
train_input_fn = get_input_fn(
batch_size, train_sequence_length, state_dict={})
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
def incremental_predict(estimator, increments):
"""Run `estimator.predict` for `i` steps for `i` in `increments`."""
step = 0
incremental_state_dict = {}
for increment in increments:
input_fn = get_input_fn(
batch_size,
increment,
state_dict=incremental_state_dict,
starting_step=step)
prediction_dict = estimator.predict(
input_fn=input_fn, as_iterable=False)
step += increment
incremental_state_dict = {
k: v
for (k, v) in prediction_dict.items()
if k.startswith(rnn_common.RNNKeys.STATE_PREFIX)
}
return prediction_dict
pred_all_at_once = incremental_predict(sequence_estimator,
[sum(prediction_steps)])
pred_step_by_step = incremental_predict(sequence_estimator,
prediction_steps)
# Check that the last `prediction_steps[-1]` steps give the same
# predictions.
np.testing.assert_array_equal(
pred_all_at_once[prediction_key.PredictionKey.CLASSES]
[:, -1 * prediction_steps[-1]:],
pred_step_by_step[prediction_key.PredictionKey.CLASSES],
err_msg='Mismatch on last {} predictions.'.format(prediction_steps[-1]))
# Check that final states are identical.
for k, v in pred_all_at_once.items():
if k.startswith(rnn_common.RNNKeys.STATE_PREFIX):
np.testing.assert_array_equal(
v, pred_step_by_step[k], err_msg='Mismatch on state {}.'.format(k))
# TODO(jamieas): move all tests below to a benchmark test.
class DynamicRNNEstimatorLearningTest(test.TestCase):
"""Learning tests for dynamic RNN Estimators."""
def testLearnSineFunction(self):
"""Tests learning a sine function."""
batch_size = 8
sequence_length = 64
train_steps = 200
eval_steps = 20
cell_size = [4]
learning_rate = 0.1
loss_threshold = 0.02
def get_sin_input_fn(batch_size, sequence_length, increment, seed=None):
def _sin_fn(x):
ranger = math_ops.linspace(
array_ops.reshape(x[0], []), (sequence_length - 1) * increment,
sequence_length + 1)
return math_ops.sin(ranger)
def input_fn():
starts = random_ops.random_uniform(
[batch_size], maxval=(2 * np.pi), seed=seed)
sin_curves = map_fn.map_fn(
_sin_fn, (starts,), dtype=dtypes.float32)
inputs = array_ops.expand_dims(
array_ops.slice(sin_curves, [0, 0], [batch_size, sequence_length]),
2)
labels = array_ops.slice(sin_curves, [0, 1],
[batch_size, sequence_length])
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size[0])
]
config = run_config.RunConfig(tf_random_seed=1234)
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_units=cell_size,
sequence_feature_columns=seq_columns,
learning_rate=learning_rate,
dropout_keep_probabilities=[0.9, 0.9],
config=config)
train_input_fn = get_sin_input_fn(
batch_size, sequence_length, np.pi / 32, seed=1234)
eval_input_fn = get_sin_input_fn(
batch_size, sequence_length, np.pi / 32, seed=4321)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
loss = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)['loss']
self.assertLess(loss, loss_threshold,
'Loss should be less than {}; got {}'.format(loss_threshold,
loss))
def testLearnShiftByOne(self):
"""Tests that learning a 'shift-by-one' example.
Each label sequence consists of the input sequence 'shifted' by one place.
The RNN must learn to 'remember' the previous input.
"""
batch_size = 16
sequence_length = 32
train_steps = 200
eval_steps = 20
cell_size = 4
learning_rate = 0.3
accuracy_threshold = 0.9
def get_shift_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length + 1],
0,
2,
dtype=dtypes.int32,
seed=seed)
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=21212)
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.MULTIPLE_VALUE,
num_classes=2,
num_units=cell_size,
sequence_feature_columns=seq_columns,
learning_rate=learning_rate,
config=config,
predict_probabilities=True)
train_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=12321)
eval_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
# Testing `predict` when `predict_probabilities=True`.
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
self.assertListEqual(
sorted(list(prediction_dict.keys())),
sorted([
prediction_key.PredictionKey.CLASSES,
prediction_key.PredictionKey.PROBABILITIES,
dynamic_rnn_estimator._get_state_name(0)
]))
predictions = prediction_dict[prediction_key.PredictionKey.CLASSES]
probabilities = prediction_dict[
prediction_key.PredictionKey.PROBABILITIES]
self.assertListEqual(list(predictions.shape), [batch_size, sequence_length])
self.assertListEqual(
list(probabilities.shape), [batch_size, sequence_length, 2])
def testLearnMean(self):
"""Test learning to calculate a mean."""
batch_size = 16
sequence_length = 3
train_steps = 200
eval_steps = 20
cell_type = 'basic_rnn'
cell_size = 8
optimizer_type = 'Momentum'
learning_rate = 0.1
momentum = 0.9
loss_threshold = 0.1
def get_mean_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
# Create examples by choosing 'centers' and adding uniform noise.
centers = math_ops.matmul(
random_ops.random_uniform(
[batch_size, 1], -0.75, 0.75, dtype=dtypes.float32, seed=seed),
array_ops.ones([1, sequence_length]))
noise = random_ops.random_uniform(
[batch_size, sequence_length],
-0.25,
0.25,
dtype=dtypes.float32,
seed=seed)
sequences = centers + noise
inputs = array_ops.expand_dims(sequences, 2)
labels = math_ops.reduce_mean(sequences, axis=[1])
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=6)
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
prediction_type=rnn_common.PredictionType.SINGLE_VALUE,
num_units=cell_size,
sequence_feature_columns=seq_columns,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
config=config)
train_input_fn = get_mean_input_fn(batch_size, sequence_length, 121)
eval_input_fn = get_mean_input_fn(batch_size, sequence_length, 212)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
loss = evaluation['loss']
self.assertLess(loss, loss_threshold,
'Loss should be less than {}; got {}'.format(loss_threshold,
loss))
def DISABLED_testLearnMajority(self):
"""Test learning the 'majority' function."""
batch_size = 16
sequence_length = 7
train_steps = 500
eval_steps = 20
cell_type = 'lstm'
cell_size = 4
optimizer_type = 'Momentum'
learning_rate = 2.0
momentum = 0.9
accuracy_threshold = 0.6
def get_majority_input_fn(batch_size, sequence_length, seed=None):
random_seed.set_random_seed(seed)
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length], 0, 2, dtype=dtypes.int32, seed=seed)
inputs = array_ops.expand_dims(math_ops.to_float(random_sequence), 2)
labels = math_ops.to_int32(
array_ops.squeeze(
math_ops.reduce_sum(inputs, axis=[1]) > (
sequence_length / 2.0)))
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=77)
sequence_estimator = dynamic_rnn_estimator.DynamicRnnEstimator(
problem_type=constants.ProblemType.CLASSIFICATION,
prediction_type=rnn_common.PredictionType.SINGLE_VALUE,
num_classes=2,
num_units=cell_size,
sequence_feature_columns=seq_columns,
cell_type=cell_type,
optimizer=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
config=config,
predict_probabilities=True)
train_input_fn = get_majority_input_fn(batch_size, sequence_length, 1111)
eval_input_fn = get_majority_input_fn(batch_size, sequence_length, 2222)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
# Testing `predict` when `predict_probabilities=True`.
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
self.assertListEqual(
sorted(list(prediction_dict.keys())),
sorted([
prediction_key.PredictionKey.CLASSES,
prediction_key.PredictionKey.PROBABILITIES,
dynamic_rnn_estimator._get_state_name(0),
dynamic_rnn_estimator._get_state_name(1)
]))
predictions = prediction_dict[prediction_key.PredictionKey.CLASSES]
probabilities = prediction_dict[
prediction_key.PredictionKey.PROBABILITIES]
self.assertListEqual(list(predictions.shape), [batch_size])
self.assertListEqual(list(probabilities.shape), [batch_size, 2])
if __name__ == '__main__':
test.main()
|
{
"content_hash": "6a840534dba132aa2652ceedd0daae8c",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 86,
"avg_line_length": 39.25228758169935,
"alnum_prop": 0.6438657253230319,
"repo_name": "ageron/tensorflow",
"id": "c3e9e3af9427037a4e7be6b86417cd081c42ef67",
"size": "30717",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
import qibuild.config
import qitoolchain.toolchain
def test_simple(qitoolchain_action):
qitoolchain_action("create", "foo")
qibuild.config.add_build_config("foo", toolchain="foo")
word_package = qitoolchain_action.get_test_package("world")
qitoolchain_action("add-package", "-c", "foo", word_package)
qitoolchain_action("remove-package", "-c", "foo", "world")
foo = qitoolchain.get_toolchain("foo")
assert foo.packages == list()
def test_fails_when_no_such_package(qitoolchain_action):
qitoolchain_action("create", "foo")
qibuild.config.add_build_config("foo", toolchain="foo")
error = qitoolchain_action("remove-package", "-c", "foo", "world", raises=True)
assert "No such package" in error
|
{
"content_hash": "29236d5961ec7076d2957a290466abfd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 83,
"avg_line_length": 43.411764705882355,
"alnum_prop": 0.6978319783197832,
"repo_name": "dmerejkowsky/qibuild",
"id": "91d73a663ea7dc821521aa1f2f095d3922d8f8bf",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qitoolchain/test/test_qitoolchain_remove_package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7442"
},
{
"name": "C++",
"bytes": "22059"
},
{
"name": "CMake",
"bytes": "267118"
},
{
"name": "Java",
"bytes": "4132"
},
{
"name": "Makefile",
"bytes": "2222"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1145711"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
}
|
"""
These are almost end-to-end tests. They create a Prompt, feed it with some
input and check the result.
"""
from functools import partial
import pytest
from prompt_toolkit.clipboard import ClipboardData, InMemoryClipboard
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import ViInsertMode
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.key_binding.bindings.named_commands import prefix_meta
from prompt_toolkit.key_binding.key_bindings import KeyBindings
from prompt_toolkit.output import DummyOutput
from prompt_toolkit.shortcuts import PromptSession
def _history():
h = InMemoryHistory()
h.append_string("line1 first input")
h.append_string("line2 second input")
h.append_string("line3 third input")
return h
def _feed_cli_with_input(
text,
editing_mode=EditingMode.EMACS,
clipboard=None,
history=None,
multiline=False,
check_line_ending=True,
key_bindings=None,
):
"""
Create a Prompt, feed it with the given user input and return the CLI
object.
This returns a (result, Application) tuple.
"""
# If the given text doesn't end with a newline, the interface won't finish.
if check_line_ending:
assert text.endswith("\r")
inp = create_pipe_input()
try:
inp.send_text(text)
session = PromptSession(
input=inp,
output=DummyOutput(),
editing_mode=editing_mode,
history=history,
multiline=multiline,
clipboard=clipboard,
key_bindings=key_bindings,
)
result = session.prompt()
return session.default_buffer.document, session.app
finally:
inp.close()
def test_simple_text_input():
# Simple text input, followed by enter.
result, cli = _feed_cli_with_input("hello\r")
assert result.text == "hello"
assert cli.current_buffer.text == "hello"
def test_emacs_cursor_movements():
"""
Test cursor movements with Emacs key bindings.
"""
# ControlA (beginning-of-line)
result, cli = _feed_cli_with_input("hello\x01X\r")
assert result.text == "Xhello"
# ControlE (end-of-line)
result, cli = _feed_cli_with_input("hello\x01X\x05Y\r")
assert result.text == "XhelloY"
# ControlH or \b
result, cli = _feed_cli_with_input("hello\x08X\r")
assert result.text == "hellX"
# Delete. (Left, left, delete)
result, cli = _feed_cli_with_input("hello\x1b[D\x1b[D\x1b[3~\r")
assert result.text == "helo"
# Left.
result, cli = _feed_cli_with_input("hello\x1b[DX\r")
assert result.text == "hellXo"
# ControlA, right
result, cli = _feed_cli_with_input("hello\x01\x1b[CX\r")
assert result.text == "hXello"
# ControlB (backward-char)
result, cli = _feed_cli_with_input("hello\x02X\r")
assert result.text == "hellXo"
# ControlF (forward-char)
result, cli = _feed_cli_with_input("hello\x01\x06X\r")
assert result.text == "hXello"
# ControlD: delete after cursor.
result, cli = _feed_cli_with_input("hello\x01\x04\r")
assert result.text == "ello"
# ControlD at the end of the input ssshould not do anything.
result, cli = _feed_cli_with_input("hello\x04\r")
assert result.text == "hello"
# Left, Left, ControlK (kill-line)
result, cli = _feed_cli_with_input("hello\x1b[D\x1b[D\x0b\r")
assert result.text == "hel"
# Left, Left Esc- ControlK (kill-line, but negative)
result, cli = _feed_cli_with_input("hello\x1b[D\x1b[D\x1b-\x0b\r")
assert result.text == "lo"
# ControlL: should not influence the result.
result, cli = _feed_cli_with_input("hello\x0c\r")
assert result.text == "hello"
# ControlRight (forward-word)
result, cli = _feed_cli_with_input("hello world\x01X\x1b[1;5CY\r")
assert result.text == "XhelloY world"
# ContrlolLeft (backward-word)
result, cli = _feed_cli_with_input("hello world\x1b[1;5DY\r")
assert result.text == "hello Yworld"
# <esc>-f with argument. (forward-word)
result, cli = _feed_cli_with_input("hello world abc def\x01\x1b3\x1bfX\r")
assert result.text == "hello world abcX def"
# <esc>-f with negative argument. (forward-word)
result, cli = _feed_cli_with_input("hello world abc def\x1b-\x1b3\x1bfX\r")
assert result.text == "hello Xworld abc def"
# <esc>-b with argument. (backward-word)
result, cli = _feed_cli_with_input("hello world abc def\x1b3\x1bbX\r")
assert result.text == "hello Xworld abc def"
# <esc>-b with negative argument. (backward-word)
result, cli = _feed_cli_with_input("hello world abc def\x01\x1b-\x1b3\x1bbX\r")
assert result.text == "hello world abc Xdef"
# ControlW (kill-word / unix-word-rubout)
result, cli = _feed_cli_with_input("hello world\x17\r")
assert result.text == "hello "
assert cli.clipboard.get_data().text == "world"
result, cli = _feed_cli_with_input("test hello world\x1b2\x17\r")
assert result.text == "test "
# Escape Backspace (unix-word-rubout)
result, cli = _feed_cli_with_input("hello world\x1b\x7f\r")
assert result.text == "hello "
assert cli.clipboard.get_data().text == "world"
result, cli = _feed_cli_with_input("hello world\x1b\x08\r")
assert result.text == "hello "
assert cli.clipboard.get_data().text == "world"
# Backspace (backward-delete-char)
result, cli = _feed_cli_with_input("hello world\x7f\r")
assert result.text == "hello worl"
assert result.cursor_position == len("hello worl")
result, cli = _feed_cli_with_input("hello world\x08\r")
assert result.text == "hello worl"
assert result.cursor_position == len("hello worl")
# Delete (delete-char)
result, cli = _feed_cli_with_input("hello world\x01\x1b[3~\r")
assert result.text == "ello world"
assert result.cursor_position == 0
# Escape-\\ (delete-horizontal-space)
result, cli = _feed_cli_with_input("hello world\x1b8\x02\x1b\\\r")
assert result.text == "helloworld"
assert result.cursor_position == len("hello")
def test_emacs_kill_multiple_words_and_paste():
# Using control-w twice should place both words on the clipboard.
result, cli = _feed_cli_with_input(
"hello world test" "\x17\x17" "--\x19\x19\r" # Twice c-w. # Twice c-y.
)
assert result.text == "hello --world testworld test"
assert cli.clipboard.get_data().text == "world test"
# Using alt-d twice should place both words on the clipboard.
result, cli = _feed_cli_with_input(
"hello world test"
"\x1bb\x1bb" # Twice left.
"\x1bd\x1bd" # Twice kill-word.
"abc"
"\x19" # Paste.
"\r"
)
assert result.text == "hello abcworld test"
assert cli.clipboard.get_data().text == "world test"
def test_interrupts():
# ControlC: raise KeyboardInterrupt.
with pytest.raises(KeyboardInterrupt):
result, cli = _feed_cli_with_input("hello\x03\r")
with pytest.raises(KeyboardInterrupt):
result, cli = _feed_cli_with_input("hello\x03\r")
# ControlD without any input: raises EOFError.
with pytest.raises(EOFError):
result, cli = _feed_cli_with_input("\x04\r")
def test_emacs_yank():
# ControlY (yank)
c = InMemoryClipboard(ClipboardData("XYZ"))
result, cli = _feed_cli_with_input("hello\x02\x19\r", clipboard=c)
assert result.text == "hellXYZo"
assert result.cursor_position == len("hellXYZ")
def test_quoted_insert():
# ControlQ - ControlB (quoted-insert)
result, cli = _feed_cli_with_input("hello\x11\x02\r")
assert result.text == "hello\x02"
def test_transformations():
# Meta-c (capitalize-word)
result, cli = _feed_cli_with_input("hello world\01\x1bc\r")
assert result.text == "Hello world"
assert result.cursor_position == len("Hello")
# Meta-u (uppercase-word)
result, cli = _feed_cli_with_input("hello world\01\x1bu\r")
assert result.text == "HELLO world"
assert result.cursor_position == len("Hello")
# Meta-u (downcase-word)
result, cli = _feed_cli_with_input("HELLO WORLD\01\x1bl\r")
assert result.text == "hello WORLD"
assert result.cursor_position == len("Hello")
# ControlT (transpose-chars)
result, cli = _feed_cli_with_input("hello\x14\r")
assert result.text == "helol"
assert result.cursor_position == len("hello")
# Left, Left, Control-T (transpose-chars)
result, cli = _feed_cli_with_input("abcde\x1b[D\x1b[D\x14\r")
assert result.text == "abdce"
assert result.cursor_position == len("abcd")
def test_emacs_other_bindings():
# Transpose characters.
result, cli = _feed_cli_with_input("abcde\x14X\r") # Ctrl-T
assert result.text == "abcedX"
# Left, Left, Transpose. (This is slightly different.)
result, cli = _feed_cli_with_input("abcde\x1b[D\x1b[D\x14X\r")
assert result.text == "abdcXe"
# Clear before cursor.
result, cli = _feed_cli_with_input("hello\x1b[D\x1b[D\x15X\r")
assert result.text == "Xlo"
# unix-word-rubout: delete word before the cursor.
# (ControlW).
result, cli = _feed_cli_with_input("hello world test\x17X\r")
assert result.text == "hello world X"
result, cli = _feed_cli_with_input("hello world /some/very/long/path\x17X\r")
assert result.text == "hello world X"
# (with argument.)
result, cli = _feed_cli_with_input("hello world test\x1b2\x17X\r")
assert result.text == "hello X"
result, cli = _feed_cli_with_input("hello world /some/very/long/path\x1b2\x17X\r")
assert result.text == "hello X"
# backward-kill-word: delete word before the cursor.
# (Esc-ControlH).
result, cli = _feed_cli_with_input("hello world /some/very/long/path\x1b\x08X\r")
assert result.text == "hello world /some/very/long/X"
# (with arguments.)
result, cli = _feed_cli_with_input(
"hello world /some/very/long/path\x1b3\x1b\x08X\r"
)
assert result.text == "hello world /some/very/X"
def test_controlx_controlx():
# At the end: go to the start of the line.
result, cli = _feed_cli_with_input("hello world\x18\x18X\r")
assert result.text == "Xhello world"
assert result.cursor_position == 1
# At the start: go to the end of the line.
result, cli = _feed_cli_with_input("hello world\x01\x18\x18X\r")
assert result.text == "hello worldX"
# Left, Left Control-X Control-X: go to the end of the line.
result, cli = _feed_cli_with_input("hello world\x1b[D\x1b[D\x18\x18X\r")
assert result.text == "hello worldX"
def test_emacs_history_bindings():
# Adding a new item to the history.
history = _history()
result, cli = _feed_cli_with_input("new input\r", history=history)
assert result.text == "new input"
history.get_strings()[-1] == "new input"
# Go up in history, and accept the last item.
result, cli = _feed_cli_with_input("hello\x1b[A\r", history=history)
assert result.text == "new input"
# Esc< (beginning-of-history)
result, cli = _feed_cli_with_input("hello\x1b<\r", history=history)
assert result.text == "line1 first input"
# Esc> (end-of-history)
result, cli = _feed_cli_with_input(
"another item\x1b[A\x1b[a\x1b>\r", history=history
)
assert result.text == "another item"
# ControlUp (previous-history)
result, cli = _feed_cli_with_input("\x1b[1;5A\r", history=history)
assert result.text == "another item"
# Esc< ControlDown (beginning-of-history, next-history)
result, cli = _feed_cli_with_input("\x1b<\x1b[1;5B\r", history=history)
assert result.text == "line2 second input"
def test_emacs_reverse_search():
history = _history()
# ControlR (reverse-search-history)
result, cli = _feed_cli_with_input("\x12input\r\r", history=history)
assert result.text == "line3 third input"
# Hitting ControlR twice.
result, cli = _feed_cli_with_input("\x12input\x12\r\r", history=history)
assert result.text == "line2 second input"
def test_emacs_arguments():
"""
Test various combinations of arguments in Emacs mode.
"""
# esc 4
result, cli = _feed_cli_with_input("\x1b4x\r")
assert result.text == "xxxx"
# esc 4 4
result, cli = _feed_cli_with_input("\x1b44x\r")
assert result.text == "x" * 44
# esc 4 esc 4
result, cli = _feed_cli_with_input("\x1b4\x1b4x\r")
assert result.text == "x" * 44
# esc - right (-1 position to the right, equals 1 to the left.)
result, cli = _feed_cli_with_input("aaaa\x1b-\x1b[Cbbbb\r")
assert result.text == "aaabbbba"
# esc - 3 right
result, cli = _feed_cli_with_input("aaaa\x1b-3\x1b[Cbbbb\r")
assert result.text == "abbbbaaa"
# esc - - - 3 right
result, cli = _feed_cli_with_input("aaaa\x1b---3\x1b[Cbbbb\r")
assert result.text == "abbbbaaa"
def test_emacs_arguments_for_all_commands():
"""
Test all Emacs commands with Meta-[0-9] arguments (both positive and
negative). No one should crash.
"""
for key in ANSI_SEQUENCES:
# Ignore BracketedPaste. This would hang forever, because it waits for
# the end sequence.
if key != "\x1b[200~":
try:
# Note: we add an 'X' after the key, because Ctrl-Q (quoted-insert)
# expects something to follow. We add an additional \r, because
# Ctrl-R and Ctrl-S (reverse-search) expect that.
result, cli = _feed_cli_with_input("hello\x1b4" + key + "X\r\r")
result, cli = _feed_cli_with_input("hello\x1b-" + key + "X\r\r")
except KeyboardInterrupt:
# This exception should only be raised for Ctrl-C
assert key == "\x03"
def test_emacs_kill_ring():
operations = (
# abc ControlA ControlK
"abc\x01\x0b"
# def ControlA ControlK
"def\x01\x0b"
# ghi ControlA ControlK
"ghi\x01\x0b"
# ControlY (yank)
"\x19"
)
result, cli = _feed_cli_with_input(operations + "\r")
assert result.text == "ghi"
result, cli = _feed_cli_with_input(operations + "\x1by\r")
assert result.text == "def"
result, cli = _feed_cli_with_input(operations + "\x1by\x1by\r")
assert result.text == "abc"
result, cli = _feed_cli_with_input(operations + "\x1by\x1by\x1by\r")
assert result.text == "ghi"
def test_emacs_selection():
# Copy/paste empty selection should not do anything.
operations = (
"hello"
# Twice left.
"\x1b[D\x1b[D"
# Control-Space
"\x00"
# ControlW (cut)
"\x17"
# ControlY twice. (paste twice)
"\x19\x19\r"
)
result, cli = _feed_cli_with_input(operations)
assert result.text == "hello"
# Copy/paste one character.
operations = (
"hello"
# Twice left.
"\x1b[D\x1b[D"
# Control-Space
"\x00"
# Right.
"\x1b[C"
# ControlW (cut)
"\x17"
# ControlA (Home).
"\x01"
# ControlY (paste)
"\x19\r"
)
result, cli = _feed_cli_with_input(operations)
assert result.text == "lhelo"
def test_emacs_insert_comment():
# Test insert-comment (M-#) binding.
result, cli = _feed_cli_with_input("hello\x1b#", check_line_ending=False)
assert result.text == "#hello"
result, cli = _feed_cli_with_input(
"hello\rworld\x1b#", check_line_ending=False, multiline=True
)
assert result.text == "#hello\n#world"
def test_emacs_record_macro():
operations = (
" "
"\x18(" # Start recording macro. C-X(
"hello"
"\x18)" # Stop recording macro.
" "
"\x18e" # Execute macro.
"\x18e" # Execute macro.
"\r"
)
result, cli = _feed_cli_with_input(operations)
assert result.text == " hello hellohello"
def test_emacs_nested_macro():
" Test calling the macro within a macro. "
# Calling a macro within a macro should take the previous recording (if one
# exists), not the one that is in progress.
operations = (
"\x18(" # Start recording macro. C-X(
"hello"
"\x18e" # Execute macro.
"\x18)" # Stop recording macro.
"\x18e" # Execute macro.
"\r"
)
result, cli = _feed_cli_with_input(operations)
assert result.text == "hellohello"
operations = (
"\x18(" # Start recording macro. C-X(
"hello"
"\x18)" # Stop recording macro.
"\x18(" # Start recording macro. C-X(
"\x18e" # Execute macro.
"world"
"\x18)" # Stop recording macro.
"\x01\x0b" # Delete all (c-a c-k).
"\x18e" # Execute macro.
"\r"
)
result, cli = _feed_cli_with_input(operations)
assert result.text == "helloworld"
def test_prefix_meta():
# Test the prefix-meta command.
b = KeyBindings()
b.add("j", "j", filter=ViInsertMode())(prefix_meta)
result, cli = _feed_cli_with_input(
"hellojjIX\r", key_bindings=b, editing_mode=EditingMode.VI
)
assert result.text == "Xhello"
def test_bracketed_paste():
result, cli = _feed_cli_with_input("\x1b[200~hello world\x1b[201~\r")
assert result.text == "hello world"
result, cli = _feed_cli_with_input("\x1b[200~hello\rworld\x1b[201~\x1b\r")
assert result.text == "hello\nworld"
# With \r\n endings.
result, cli = _feed_cli_with_input("\x1b[200~hello\r\nworld\x1b[201~\x1b\r")
assert result.text == "hello\nworld"
# With \n endings.
result, cli = _feed_cli_with_input("\x1b[200~hello\nworld\x1b[201~\x1b\r")
assert result.text == "hello\nworld"
def test_vi_cursor_movements():
"""
Test cursor movements with Vi key bindings.
"""
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
result, cli = feed("\x1b\r")
assert result.text == ""
assert cli.editing_mode == EditingMode.VI
# Esc h a X
result, cli = feed("hello\x1bhaX\r")
assert result.text == "hellXo"
# Esc I X
result, cli = feed("hello\x1bIX\r")
assert result.text == "Xhello"
# Esc I X
result, cli = feed("hello\x1bIX\r")
assert result.text == "Xhello"
# Esc 2hiX
result, cli = feed("hello\x1b2hiX\r")
assert result.text == "heXllo"
# Esc 2h2liX
result, cli = feed("hello\x1b2h2liX\r")
assert result.text == "hellXo"
# Esc \b\b
result, cli = feed("hello\b\b\r")
assert result.text == "hel"
# Esc \b\b
result, cli = feed("hello\b\b\r")
assert result.text == "hel"
# Esc 2h D
result, cli = feed("hello\x1b2hD\r")
assert result.text == "he"
# Esc 2h rX \r
result, cli = feed("hello\x1b2hrX\r")
assert result.text == "heXlo"
def test_vi_operators():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
# Esc g~0
result, cli = feed("hello\x1bg~0\r")
assert result.text == "HELLo"
# Esc gU0
result, cli = feed("hello\x1bgU0\r")
assert result.text == "HELLo"
# Esc d0
result, cli = feed("hello\x1bd0\r")
assert result.text == "o"
def test_vi_text_objects():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
# Esc gUgg
result, cli = feed("hello\x1bgUgg\r")
assert result.text == "HELLO"
# Esc gUU
result, cli = feed("hello\x1bgUU\r")
assert result.text == "HELLO"
# Esc di(
result, cli = feed("before(inside)after\x1b8hdi(\r")
assert result.text == "before()after"
# Esc di[
result, cli = feed("before[inside]after\x1b8hdi[\r")
assert result.text == "before[]after"
# Esc da(
result, cli = feed("before(inside)after\x1b8hda(\r")
assert result.text == "beforeafter"
def test_vi_digraphs():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
# C-K o/
result, cli = feed("hello\x0bo/\r")
assert result.text == "helloø"
# C-K /o (reversed input.)
result, cli = feed("hello\x0b/o\r")
assert result.text == "helloø"
# C-K e:
result, cli = feed("hello\x0be:\r")
assert result.text == "helloë"
# C-K xxy (Unknown digraph.)
result, cli = feed("hello\x0bxxy\r")
assert result.text == "helloy"
def test_vi_block_editing():
" Test Vi Control-V style block insertion. "
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
operations = (
# Six lines of text.
"-line1\r-line2\r-line3\r-line4\r-line5\r-line6"
# Go to the second character of the second line.
"\x1bkkkkkkkj0l"
# Enter Visual block mode.
"\x16"
# Go down two more lines.
"jj"
# Go 3 characters to the right.
"lll"
# Go to insert mode.
"insert" # (Will be replaced.)
# Insert stars.
"***"
# Escape again.
"\x1b\r"
)
# Control-I
result, cli = feed(operations.replace("insert", "I"))
assert result.text == "-line1\n-***line2\n-***line3\n-***line4\n-line5\n-line6"
# Control-A
result, cli = feed(operations.replace("insert", "A"))
assert result.text == "-line1\n-line***2\n-line***3\n-line***4\n-line5\n-line6"
def test_vi_block_editing_empty_lines():
" Test block editing on empty lines. "
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
operations = (
# Six empty lines.
"\r\r\r\r\r"
# Go to beginning of the document.
"\x1bgg"
# Enter Visual block mode.
"\x16"
# Go down two more lines.
"jj"
# Go 3 characters to the right.
"lll"
# Go to insert mode.
"insert" # (Will be replaced.)
# Insert stars.
"***"
# Escape again.
"\x1b\r"
)
# Control-I
result, cli = feed(operations.replace("insert", "I"))
assert result.text == "***\n***\n***\n\n\n"
# Control-A
result, cli = feed(operations.replace("insert", "A"))
assert result.text == "***\n***\n***\n\n\n"
def test_vi_visual_line_copy():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
operations = (
# Three lines of text.
"-line1\r-line2\r-line3\r-line4\r-line5\r-line6"
# Go to the second character of the second line.
"\x1bkkkkkkkj0l"
# Enter Visual linemode.
"V"
# Go down one line.
"j"
# Go 3 characters to the right (should not do much).
"lll"
# Copy this block.
"y"
# Go down one line.
"j"
# Insert block twice.
"2p"
# Escape again.
"\x1b\r"
)
result, cli = feed(operations)
assert (
result.text
== "-line1\n-line2\n-line3\n-line4\n-line2\n-line3\n-line2\n-line3\n-line5\n-line6"
)
def test_vi_visual_empty_line():
"""
Test edge case with an empty line in Visual-line mode.
"""
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
# 1. Delete first two lines.
operations = (
# Three lines of text. The middle one is empty.
"hello\r\rworld"
# Go to the start.
"\x1bgg"
# Visual line and move down.
"Vj"
# Delete.
"d\r"
)
result, cli = feed(operations)
assert result.text == "world"
# 1. Delete middle line.
operations = (
# Three lines of text. The middle one is empty.
"hello\r\rworld"
# Go to middle line.
"\x1bggj"
# Delete line
"Vd\r"
)
result, cli = feed(operations)
assert result.text == "hello\nworld"
def test_vi_character_delete_after_cursor():
" Test 'x' keypress. "
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
# Delete one character.
result, cli = feed("abcd\x1bHx\r")
assert result.text == "bcd"
# Delete multiple character.s
result, cli = feed("abcd\x1bH3x\r")
assert result.text == "d"
# Delete on empty line.
result, cli = feed("\x1bo\x1bo\x1bggx\r")
assert result.text == "\n\n"
# Delete multiple on empty line.
result, cli = feed("\x1bo\x1bo\x1bgg10x\r")
assert result.text == "\n\n"
# Delete multiple on empty line.
result, cli = feed("hello\x1bo\x1bo\x1bgg3x\r")
assert result.text == "lo\n\n"
def test_vi_character_delete_before_cursor():
" Test 'X' keypress. "
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI, multiline=True)
# Delete one character.
result, cli = feed("abcd\x1bX\r")
assert result.text == "abd"
# Delete multiple character.
result, cli = feed("hello world\x1b3X\r")
assert result.text == "hello wd"
# Delete multiple character on multiple lines.
result, cli = feed("hello\x1boworld\x1bgg$3X\r")
assert result.text == "ho\nworld"
result, cli = feed("hello\x1boworld\x1b100X\r")
assert result.text == "hello\nd"
# Delete on empty line.
result, cli = feed("\x1bo\x1bo\x1b10X\r")
assert result.text == "\n\n"
def test_vi_character_paste():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
# Test 'p' character paste.
result, cli = feed("abcde\x1bhhxp\r")
assert result.text == "abdce"
assert result.cursor_position == 3
# Test 'P' character paste.
result, cli = feed("abcde\x1bhhxP\r")
assert result.text == "abcde"
assert result.cursor_position == 2
def test_vi_temp_navigation_mode():
"""
Test c-o binding: go for one action into navigation mode.
"""
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
result, cli = feed("abcde" "\x0f" "3h" "x\r") # c-o # 3 times to the left.
assert result.text == "axbcde"
assert result.cursor_position == 2
result, cli = feed("abcde" "\x0f" "b" "x\r") # c-o # One word backwards.
assert result.text == "xabcde"
assert result.cursor_position == 1
# In replace mode
result, cli = feed(
"abcdef"
"\x1b" # Navigation mode.
"0l" # Start of line, one character to the right.
"R" # Replace mode
"78"
"\x0f" # c-o
"l" # One character forwards.
"9\r"
)
assert result.text == "a78d9f"
assert result.cursor_position == 5
def test_vi_macros():
feed = partial(_feed_cli_with_input, editing_mode=EditingMode.VI)
# Record and execute macro.
result, cli = feed("\x1bqcahello\x1bq@c\r")
assert result.text == "hellohello"
assert result.cursor_position == 9
# Running unknown macro.
result, cli = feed("\x1b@d\r")
assert result.text == ""
assert result.cursor_position == 0
# When a macro is called within a macro.
# It shouldn't result in eternal recursion.
result, cli = feed("\x1bqxahello\x1b@xq@x\r")
assert result.text == "hellohello"
assert result.cursor_position == 9
# Nested macros.
result, cli = feed(
# Define macro 'x'.
"\x1bqxahello\x1bq"
# Define macro 'y' which calls 'x'.
"qya\x1b@xaworld\x1bq"
# Delete line.
"2dd"
# Execute 'y'
"@y\r"
)
assert result.text == "helloworld"
def test_accept_default():
"""
Test `prompt(accept_default=True)`.
"""
inp = create_pipe_input()
session = PromptSession(input=inp, output=DummyOutput())
result = session.prompt(default="hello", accept_default=True)
assert result == "hello"
# Test calling prompt() for a second time. (We had an issue where the
# prompt reset between calls happened at the wrong time, breaking this.)
result = session.prompt(default="world", accept_default=True)
assert result == "world"
inp.close()
|
{
"content_hash": "5e39c37adf2f1a9f65fbb6870b9fd4c5",
"timestamp": "",
"source": "github",
"line_count": 947,
"max_line_length": 91,
"avg_line_length": 29.633579725448786,
"alnum_prop": 0.6110537006022164,
"repo_name": "jonathanslenders/python-prompt-toolkit",
"id": "c360d8ebf14889bc9a780b0ad5fdd74e03cbf03b",
"size": "28084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1191674"
}
],
"symlink_target": ""
}
|
"""Development settings and globals."""
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'syntacticframes',
'USER': 'REPLACE_ME',
'PASSWORD': 'test',
'HOST': 'localhost',
'PORT': '',
'ATOMIC_REQUESTS': True,
}
}
########## END DATABASE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_CONFIG = {
'JQUERY_URL': '/public/js/jquery-2.1.3.min.js'
}
########## END TOOLBAR CONFIGURATION
|
{
"content_hash": "934e94e460ff72bce9219d9ac8b57fd8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 25.617021276595743,
"alnum_prop": 0.6461794019933554,
"repo_name": "aymara/verbenet-editor",
"id": "0f933e71671a30d6c6c2be3843dea035bce3757e",
"size": "1204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syntacticframes_project/syntacticframes/settings/local.example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "120"
},
{
"name": "CSS",
"bytes": "6083"
},
{
"name": "HTML",
"bytes": "24997999"
},
{
"name": "JavaScript",
"bytes": "26332"
},
{
"name": "Python",
"bytes": "221026"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import paddle
from paddle.static import Program, program_guard
# In static mode, inplace strategy will not be used in Inplace APIs.
class TestStaticAutoGeneratedAPI(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.init_data()
self.set_np_compare_func()
def init_data(self):
self.dtype = 'float32'
self.shape = [10, 20]
self.np_x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
def set_np_compare_func(self):
self.np_compare = np.array_equal
def executed_paddle_api(self, x):
return x.ceil()
def executed_numpy_api(self, x):
return np.ceil(x)
def test_api(self):
main_prog = Program()
with program_guard(main_prog, Program()):
x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype)
out = self.executed_paddle_api(x)
exe = paddle.static.Executor(place=paddle.CPUPlace())
fetch_x, fetch_out = exe.run(
main_prog, feed={"x": self.np_x}, fetch_list=[x, out]
)
np.testing.assert_array_equal(fetch_x, self.np_x)
self.assertTrue(
self.np_compare(fetch_out, self.executed_numpy_api(self.np_x))
)
class TestStaticInplaceAutoGeneratedAPI(TestStaticAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.ceil_()
class TestStaticFloorAPI(TestStaticAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.floor()
def executed_numpy_api(self, x):
return np.floor(x)
class TestStaticInplaceFloorAPI(TestStaticFloorAPI):
def executed_paddle_api(self, x):
return x.floor_()
class TestStaticExpAPI(TestStaticAutoGeneratedAPI):
def set_np_compare_func(self):
self.np_compare = np.allclose
def executed_paddle_api(self, x):
return x.exp()
def executed_numpy_api(self, x):
return np.exp(x)
class TestStaticInplaceExpAPI(TestStaticExpAPI):
def executed_paddle_api(self, x):
return x.exp_()
class TestStaticReciprocalAPI(TestStaticAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.reciprocal()
def executed_numpy_api(self, x):
return np.reciprocal(x)
class TestStaticInplaceReciprocalAPI(TestStaticReciprocalAPI):
def executed_paddle_api(self, x):
return x.reciprocal_()
class TestStaticRoundAPI(TestStaticAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.round()
def executed_numpy_api(self, x):
return np.round(x)
class TestStaticInplaceRoundAPI(TestStaticRoundAPI):
def executed_paddle_api(self, x):
return x.round_()
class TestStaticSqrtAPI(TestStaticAutoGeneratedAPI):
def init_data(self):
self.dtype = 'float32'
self.shape = [10, 20]
self.np_x = np.random.uniform(0, 5, self.shape).astype(self.dtype)
def set_np_compare_func(self):
self.np_compare = np.allclose
def executed_paddle_api(self, x):
return x.sqrt()
def executed_numpy_api(self, x):
return np.sqrt(x)
class TestStaticInplaceSqrtAPI(TestStaticSqrtAPI):
def executed_paddle_api(self, x):
return x.sqrt_()
class TestStaticRsqrtAPI(TestStaticSqrtAPI):
def executed_paddle_api(self, x):
return x.rsqrt()
def executed_numpy_api(self, x):
return 1 / np.sqrt(x)
class TestStaticInplaceRsqrtAPI(TestStaticRsqrtAPI):
def executed_paddle_api(self, x):
return x.rsqrt_()
# In dygraph mode, inplace strategy will be used in Inplace APIs.
class TestDygraphAutoGeneratedAPI(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.init_data()
self.set_np_compare_func()
def init_data(self):
self.dtype = 'float32'
self.shape = [10, 20]
self.np_x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
def set_np_compare_func(self):
self.np_compare = np.array_equal
def executed_paddle_api(self, x):
return x.ceil()
def executed_numpy_api(self, x):
return np.ceil(x)
def test_api(self):
x = paddle.to_tensor(self.np_x, dtype=self.dtype)
out = self.executed_paddle_api(x)
self.assertTrue(
self.np_compare(out.numpy(), self.executed_numpy_api(self.np_x))
)
class TestDygraphInplaceAutoGeneratedAPI(TestDygraphAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.ceil_()
class TestDygraphFloorAPI(TestDygraphAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.floor()
def executed_numpy_api(self, x):
return np.floor(x)
class TestDygraphInplaceFloorAPI(TestDygraphFloorAPI):
def executed_paddle_api(self, x):
return x.floor_()
class TestDygraphExpAPI(TestDygraphAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.exp()
def executed_numpy_api(self, x):
return np.exp(x)
def set_np_compare_func(self):
self.np_compare = np.allclose
class TestDygraphInplaceExpAPI(TestDygraphExpAPI):
def executed_paddle_api(self, x):
return x.exp_()
class TestDygraphReciprocalAPI(TestDygraphAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.reciprocal()
def executed_numpy_api(self, x):
return np.reciprocal(x)
class TestDygraphInplaceReciprocalAPI(TestDygraphReciprocalAPI):
def executed_paddle_api(self, x):
return x.reciprocal_()
class TestDygraphRoundAPI(TestDygraphAutoGeneratedAPI):
def executed_paddle_api(self, x):
return x.round()
def executed_numpy_api(self, x):
return np.round(x)
class TestDygraphInplaceRoundAPI(TestDygraphRoundAPI):
def executed_paddle_api(self, x):
return x.round_()
class TestDygraphSqrtAPI(TestDygraphAutoGeneratedAPI):
def init_data(self):
self.dtype = 'float32'
self.shape = [10, 20]
self.np_x = np.random.uniform(0, 100, self.shape).astype(self.dtype)
def set_np_compare_func(self):
self.np_compare = np.allclose
def executed_paddle_api(self, x):
return x.sqrt()
def executed_numpy_api(self, x):
return np.sqrt(x)
class TestDygraphInplaceSqrtAPI(TestDygraphSqrtAPI):
def executed_paddle_api(self, x):
return x.sqrt_()
class TestDygraphRsqrtAPI(TestDygraphSqrtAPI):
def executed_paddle_api(self, x):
return x.rsqrt()
def executed_numpy_api(self, x):
return 1.0 / np.sqrt(x)
class TestDygraphInplaceRsqrtAPI(TestDygraphRsqrtAPI):
def executed_paddle_api(self, x):
return x.rsqrt_()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "5d53a73edf865e7be87aa2ddd2bca50c",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 80,
"avg_line_length": 25.41509433962264,
"alnum_prop": 0.6580549368968077,
"repo_name": "luotao1/Paddle",
"id": "d8f8b5dbc72ce1a98915a55e3db39fefa77ac6b5",
"size": "7348",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
'''NeuroM neurom morphology analysis package
Examples:
Load a neuron
>>> import neurom as nm
>>> nrn = nm.load_neuron('some/data/path/morph_file.swc')
Obtain some morphometrics using the get function
>>> ap_seg_len = nm.get('segment_lengths', nrn, neurite_type=nm.APICAL_DENDRITE)
>>> ax_sec_len = nm.get('section_lengths', nrn, neurite_type=nm.AXON)
Load neurons from a directory. This loads all SWC, HDF5 or NeuroLucida .asc\
files it finds and returns a list of neurons
>>> import numpy as np # For mean value calculation
>>> nrns = nm.load_neurons('some/data/directory')
>>> for nrn in nrns:
... print 'mean section length', np.mean(nm.get('section_lengths', nrn))
Apply a function to a selection of neurites in a neuron or population.
This example gets the number of points in each axon in a neuron population
>>> import neurom as nm
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in nm.iter_neurites(nrns, mapping, filter)]
'''
import logging as _logging
from .version import VERSION as __version__
from .core import iter_neurites, iter_sections, graft_neuron, iter_segments, NeuriteType
from .core.dataformat import COLS
from .core.types import NEURITES as NEURITE_TYPES
from .io.utils import load_neuron, load_neurons, NeuronLoader
from .fst import get
APICAL_DENDRITE = NeuriteType.apical_dendrite
BASAL_DENDRITE = NeuriteType.basal_dendrite
AXON = NeuriteType.axon
SOMA = NeuriteType.soma
ANY_NEURITE = NeuriteType.all
# prevent 'No handlers could be found for logger ...' errors
# https://pythonhosted.org/logutils/libraries.html
_logging.getLogger(__name__).addHandler(_logging.NullHandler())
|
{
"content_hash": "a1d9b7d87029b983d938eca42c41ca7d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 88,
"avg_line_length": 35.08,
"alnum_prop": 0.7109464082098061,
"repo_name": "eleftherioszisis/NeuroM",
"id": "cbae5e84e1bb19fb6ba536b744a5cbc04329d552",
"size": "3448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neurom/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "697203"
},
{
"name": "Jupyter Notebook",
"bytes": "2132229"
},
{
"name": "Python",
"bytes": "570902"
}
],
"symlink_target": ""
}
|
from calvin.actor.actor import Actor, ActionResult, condition, guard, manage
class Alternate(Actor):
"""
Alternating between two streams of tokens
Inputs:
token_1 : first token stream
token_2 : second token stream
Outputs:
token : resulting token stream
"""
@manage(['token_one'])
def init(self):
self.token_one = True
def is_even(self, input):
return self.token_one
def is_odd(self, input):
return not self.is_even(input)
@condition(['token_1'], ['token'])
@guard(is_even)
def port_one(self, input):
self.token_one = False
return ActionResult(production=(input, ))
@condition(['token_2'], ['token'])
@guard(is_odd)
def port_two(self, input):
self.token_one = True
return ActionResult(production=(input, ))
action_priority = (port_one, port_two)
test_set = [
{
'in': {'token_1': [1, 2], 'token_2': ['a', 'b']},
'out': {'token': [1, 'a', 2, 'b']}
},
{
'in': {'token_1': [1]},
'out': {'token': [1]}
}
]
|
{
"content_hash": "af32a419cfdb46014d09ba52c9fb2b76",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 24.21276595744681,
"alnum_prop": 0.5272407732864675,
"repo_name": "josrolgil/exjobbCalvin",
"id": "93b57fa4d010999d7fdd21ab80562212dd40f8e4",
"size": "1743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calvin/actorstore/systemactors/std/Alternate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1727"
},
{
"name": "HTML",
"bytes": "7958"
},
{
"name": "JavaScript",
"bytes": "59355"
},
{
"name": "Python",
"bytes": "1579174"
},
{
"name": "Shell",
"bytes": "12920"
}
],
"symlink_target": ""
}
|
"""
Source: http://docs.python.org/3/library/datetime.html → "Example tzinfo classes"
Idea: http://stackoverflow.com/a/2071364/183995
"""
from datetime import datetime as dt, tzinfo, timedelta
import time as _time
ZERO = timedelta(0)
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
|
{
"content_hash": "b531c028a2261532f75c4506262d2ff7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 23.42222222222222,
"alnum_prop": 0.5986717267552182,
"repo_name": "HesselTjeerdsma/Cyber-Physical-Pacman-Game",
"id": "1e38e4bb52acc0ae2733b8239571058847879bb9",
"size": "1081",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Algor/flask/lib/python2.7/site-packages/requestlogger/timehacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "144418"
},
{
"name": "Batchfile",
"bytes": "24"
},
{
"name": "C",
"bytes": "527696"
},
{
"name": "C++",
"bytes": "274346"
},
{
"name": "CSS",
"bytes": "79630"
},
{
"name": "Fortran",
"bytes": "14949"
},
{
"name": "JavaScript",
"bytes": "28328"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "30670952"
},
{
"name": "Roff",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "9697"
},
{
"name": "TeX",
"bytes": "1628"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, session, render_template, abort, request, jsonify
import mysql.connector
from handlers.moment import publicMoments, userMoments, listMoments
from handlers.watch import allWatch
from handlers.like import userLike
from handlers.comment import userComments
import secret
lists_routes = Blueprint('lists_routes', __name__, template_folder = 'templates')
config = secret.mysql()
#read most recent public moments
#return 0 for error
@lists_routes.route('/lists/readPublic', methods = ['GET', 'POST'])
def readPublic():
#only response to post request
if request.method == 'POST':
cnx = mysql.connector.connect(**config)
try:
#get 20 newest moments
new = publicMoments(0, cnx)
if new == '0':
return '0'
finally:
cnx.close()
return jsonify(new)
else:
abort(404)
#Load more public moments
#return 0 for error
@lists_routes.route('/lists/loadPublic', methods = ['GET', 'POST'])
def loadPublic():
#only response to post request
if request.method == 'POST':
pin = request.json['times'] * 20
cnx = mysql.connector.connect(**config)
try:
#get 10 newest moments
new = publicMoments(pin, cnx)
if new == '0':
return '0'
finally:
cnx.close()
return jsonify(new)
else:
abort(404)
#read most recent moment on watch list
#return 0 for error
#return 1 for didn't watch a pet
@lists_routes.route('/lists/readWatch', methods = ['GET', 'POST'])
def readWatch():
if request.method == 'POST':
userId = request.json['id']
cnx = mysql.connector.connect(**config)
try:
#get all pets id one user watched
lists = allWatch(userId, cnx)
if lists == '0':
return '0'
#didn't watch a pet return 1
elif len(lists) == 0:
return '1'
#get recent 20 moment from pet list
new = userMoments(lists, 0, cnx)
finally:
cnx.close()
if new == '0':
return '0'
else:
return jsonify([new, lists])
else:
abort(404)
#load more moment on watch list
#return 0 for error
@lists_routes.route('/lists/loadWatch', methods = ['GET', 'POST'])
def loadWatch():
if request.method == 'POST':
lists = request.json['list']
load = request.json['load']
cnx = mysql.connector.connect(**config)
try:
pin = load * 20
new = userMoments(lists, pin, cnx)
finally:
cnx.close()
if new == '0':
return '0'
else:
return jsonify(new)
else:
abort(404)
#load 20 love moment for one user
#return 0 for error
@lists_routes.route('/lists/readLove', methods = ['GET', 'POST'])
def readLove():
if request.method == 'POST':
id = request.json['id']
load = request.json['load'] * 20
cnx = mysql.connector.connect(**config)
try:
#get all love moments id
love = userLike(id, load, cnx)
if love == '0':
return '0'
elif len(love) == 0:
moments = []
else:
loves = [l[0] for l in love]
moments = listMoments(loves, cnx)
finally:
cnx.close()
if moments == '0':
return '0'
else:
return jsonify(moments)
else:
abort(404)
#load 20 moment where user leave a comment
#return 0 for error
@lists_routes.route('/lists/readComment', methods = ['GET', 'POST'])
def readComment():
if request.method == 'POST':
id = request.json['id']
load = request.json['load'] * 20
cnx = mysql.connector.connect(**config)
try:
comment = userComments(id, load, cnx)
if comment == '0':
return '0'
elif len(comment) == 0:
moments = []
else:
comments = [c[0] for c in comment]
moments = listMoments(comments, cnx)
finally:
cnx.close()
if moments == '0':
return '0'
else:
return jsonify(moments)
else:
abort(404)
|
{
"content_hash": "46bace1d72d91eefc75cbe8e054367b1",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 81,
"avg_line_length": 29.64625850340136,
"alnum_prop": 0.5378614043139055,
"repo_name": "byn9826/Thousand-Day",
"id": "ed1f6080ef162afaeb6174f50a08ee7af81efd61",
"size": "4400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/lists.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "16241"
},
{
"name": "Python",
"bytes": "161765"
}
],
"symlink_target": ""
}
|
import pytest
import github3
from .helper import (UnitHelper, UnitIteratorHelper, create_url_helper,
create_example_data_helper)
url_for = create_url_helper(
'https://api.github.com/users/octocat'
)
get_users_example_data = create_example_data_helper('users_example')
example_data = get_users_example_data()
class TestUserIterators(UnitIteratorHelper):
"""Test User methods that return iterators."""
described_class = github3.users.User
example_data = example_data.copy()
def test_events(self):
"""Test the request to retrieve a user's events."""
i = self.instance.events()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('events'),
params={'per_page': 100},
headers={}
)
def test_followers(self):
"""Test the request to retrieve follwers."""
f = self.instance.followers()
self.get_next(f)
self.session.get.assert_called_once_with(
url_for('followers'),
params={'per_page': 100},
headers={}
)
def test_following(self):
"""Test the request to retrieve users a user is following."""
i = self.instance.following()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('following'),
params={'per_page': 100},
headers={}
)
def test_keys(self):
"""Test the request to retrieve a user's public keys."""
i = self.instance.keys()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('keys'),
params={'per_page': 100},
headers={}
)
def test_organization_events(self):
"""Test the request to retrieve a user's organization events."""
i = self.instance.organization_events('org-name')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('events/orgs/org-name'),
params={'per_page': 100},
headers={}
)
def test_organization_events_requires_an_org(self):
"""Test that organization_events will ignore empty org names."""
i = self.instance.organization_events(None)
with pytest.raises(StopIteration):
next(i)
def test_organizations(self):
"""Test the request to retrieve the orgs a user belongs to."""
i = self.instance.organizations()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('orgs'),
params={'per_page': 100},
headers={}
)
def test_received_events(self):
"""Test the request to retrieve the events a user receives."""
i = self.instance.received_events()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('received_events'),
params={'per_page': 100},
headers={}
)
def test_received_events_public_only(self):
"""Test the public request to retrieve the events a user received."""
i = self.instance.received_events(True)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('received_events/public'),
params={'per_page': 100},
headers={}
)
def test_starred_repositories(self):
"""Test the request to retrieve a user's starred repos."""
i = self.instance.starred_repositories()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('starred'),
params={'per_page': 100},
headers={
'Accept': 'application/vnd.github.v3.star+json'
}
)
def test_subscriptions(self):
"""Test the request to retrieve a user's subscriptions."""
i = self.instance.subscriptions()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('subscriptions'),
params={'per_page': 100},
headers={}
)
class TestUsersRequiresAuth(UnitHelper):
"""Test that ensure certain methods on the User class requires auth."""
described_class = github3.users.User
example_data = example_data.copy()
def after_setup(self):
"""Disable authentication on sessions."""
self.session.has_auth.return_value = False
def test_organization_events(self):
"""Test that #organization_events requires authentication."""
with pytest.raises(github3.GitHubError):
self.instance.organization_events('foo')
|
{
"content_hash": "67e8d1d1f6a158f19f87af815d402fec",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 77,
"avg_line_length": 29.556962025316455,
"alnum_prop": 0.582441113490364,
"repo_name": "ueg1990/github3.py",
"id": "fad34bbf9ff383955ff4f96d2ee31dc2b939257a",
"size": "4670",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/test_users.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "97717"
},
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "686011"
}
],
"symlink_target": ""
}
|
__author__ = 'Ming'
import threading
import socket
import serial
import cv2
import numpy as np
import math
import sys
# distance data measured by ultrasonic sensor
sensor_data =b""
class NeuralNetwork(object):
def __init__(self):
self.annmodel = cv2.ml.ANN_MLP_load('mlp_mlp2.xml')
def predict(self, samples):
ret, resp = self.annmodel.predict(samples)
return resp.argmax(-1) #find max
class Car_Control(object):
def __init__(self):
self.serial_port = serial.Serial('com3', 115200, timeout=1)
def steer(self, prediction):
self.serial_port.write(chr(prediction*10).encode())
print('', prediction)
class VideoDateHandle(object):
# create neural network
def __init__(self):
self.model = NeuralNetwork()
self.my_car = Car_Control()
self.server_socket = socket.socket()
self.server_socket.bind(('192.168.191.1', 8080))
self.server_socket.listen(0)
self.connection = self.server_socket.accept()[0].makefile('rb')
self.handle()
def handle(self):
stream_bytes = b' '
et1 = cv2.getTickCount()
# stream video frames one by one
try:
while True:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last+2]
stream_bytes = stream_bytes[last+2:]
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), 0)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), -1)
# lower half of the image
half_gray = gray[50:170, :]
half_img = image[50:170, :]
half_gray = cv2.GaussianBlur(half_gray,(15, 15),0)
half_gray = cv2.threshold(half_gray, 100, 255, cv2.THRESH_BINARY)[1]
# reshape image
image_array = half_gray.reshape(1, 38400).astype(np.float32)
# neural network makes prediction
prediction = self.model.predict(image_array)
# et2 = cv2.getTickCount()
# time1 = (et2 - et1)/ cv2.getTickFrequency()
# print("time1:",time1)
# et1 = cv2.getTickCount()
cv2.putText(half_gray,"{}".format(prediction), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
cv2.imshow('image', half_gray)
self.my_car.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
VideoDateHandle()
|
{
"content_hash": "d0ae226e8b02623db1ea80cb6537adfe",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 123,
"avg_line_length": 31.08421052631579,
"alnum_prop": 0.534371825262445,
"repo_name": "lurenlym/BPnetwork_smartcar",
"id": "fc4dbde5183cf3908d446bad8fa2f5b92d4a0f11",
"size": "2953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "computer/computer_mydriver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6784"
},
{
"name": "C++",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "14549"
}
],
"symlink_target": ""
}
|
import os
import logging
import subprocess
def spawn(args, **kwargs):
"""Spawn a subprocess and return it back
"""
if not 'cwd' in kwargs:
kwargs['cwd'] = os.path.dirname(os.path.abspath(__file__))
kwargs['bufsize'] = -1
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = startupinfo
try:
return subprocess.Popen(args, **kwargs)
except:
logging.error(
'Your operating system denied the spawn of {} process'.format(
args[0])
)
|
{
"content_hash": "15a98fade473f89bcb45a939e12d553f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 25.2,
"alnum_prop": 0.5984126984126984,
"repo_name": "j5shi/ST3_Config",
"id": "04c3e7f93726fcbe1554d3562be39e6df0cbbbc5",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Packages/Anaconda/anaconda_server/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1194072"
}
],
"symlink_target": ""
}
|
import logging
from paramiko import client as pmclient
import time
class SSHSession(object):
def __init__(self, hostname, username, password, logger=None):
self.logger = logger or logging
self.client = ssh = pmclient.SSHClient()
ssh.set_missing_host_key_policy(pmclient.AutoAddPolicy())
self.logger.debug("SSHClient: Connecting to %s with %s and password", hostname, username)
ssh.connect(hostname, username=username, password=password, look_for_keys=False)
def execute(self, command, inputs=None, read_stdout=False, *args, **kwargs):
stdin, stdout, stderr = self.client.exec_command(command, *args, **kwargs)
if inputs:
stdin.channel.sendall(inputs)
stdin.channel.shutdown_write()
while True:
if stdout.channel.exit_status_ready():
break
time.sleep(0.001)
rc = stdout.channel.recv_exit_status()
if read_stdout:
return rc, stdout.read()
else:
return rc
def ping(self):
self.execute('echo')
def close(self):
if self.client:
self.logger.debug("Shutting down SSH session")
self.client.close()
self.client = None
class SFTPSession(object):
def __init__(self, hostname=None, username=None, password=None, initial_path=None, logger=None, ssh=None):
assert ssh or (hostname and username and password)
self.__own_ssh = not bool(ssh)
ssh = self.__ssh_client = ssh or SSHSession(hostname, username, password, logger=logger)
self.logger = logger or logging
logger.debug("SFTP: Opening session")
self.client = sftp = ssh.client.open_sftp()
if initial_path:
self.logger.debug("remote cd: %s", initial_path)
sftp.chdir(initial_path)
def ping(self):
self.client.stat('.')
def close(self):
if self.client:
self.logger.debug("Shutting down SFTP session")
self.client.close()
self.client = None
if self.__own_ssh and self.__ssh_client:
self.__ssh_client.close()
self._ssh_client = None
|
{
"content_hash": "c75d5dc80159ae525a1fa63166ec00c9",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 110,
"avg_line_length": 29.105263157894736,
"alnum_prop": 0.6030741410488246,
"repo_name": "kfsone/tinker",
"id": "93351d962b325c4e457a88da77e281626a7f8afd",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sync/sftpsession.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "630"
},
{
"name": "C#",
"bytes": "5936"
},
{
"name": "C++",
"bytes": "73586"
},
{
"name": "CMake",
"bytes": "612"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "Go",
"bytes": "10709"
},
{
"name": "Jupyter Notebook",
"bytes": "53097"
},
{
"name": "Python",
"bytes": "232081"
},
{
"name": "Rust",
"bytes": "743"
},
{
"name": "Shell",
"bytes": "1897"
},
{
"name": "Vim script",
"bytes": "1706"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
import glob
swift_container_input = "anders-test"
swift_container_output = "anders-test"
os_password = os.environ["OS_PASSWORD"]
os_username = os.environ["OS_USERNAME"]
try:
# This command is example of providing all parameters as one environment variable named "envparams"
dockerCommand1 = 'docker run -e "envparams=infile={0},outfile={1},swift_container_input={2},swift_container_output={3},OS_AUTH_URL=https://identity1.citycloud.com:5000/v3/,OS_AUTH_VERSION=3,OS_TENANT_NAME=UU_KTH_PhenoMeNal,OS_TENANT_ID=17bcdf88f1fd40de85f53b5038722681,OS_REGION_NAME=Lon1,OS_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0,OS_USER_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0,OS_USERNAME={4},OS_PASSWORD={5}" farmbio/mtbls233-openms /bin/OpenMS.py PeakPickerHiRes -in {{infile}} -out {{outfile}} -ini /params_MTBLS233/PPparam.ini'
# first list all files in container (to be processed one by one in loop)
swiftlist = subprocess.check_output(["swift", "list", "anders-test", "-p", "testdata"])
for aFile in swiftlist.splitlines():
print aFile
inputfile = aFile
outputfile = "/MTBLS233-POP/pp_out/" + os.path.basename(aFile)
command = dockerCommand1.format(inputfile, outputfile, swift_container_input, swift_container_output, os_username, os_password)
stdout = subprocess.check_output( command, shell=True )
print "stdout: " + stdout
# This command is example of providing parameters as separate environment variables
dockerCommand2 = 'docker run -e "infile={0}" -e "outfile={1}" -e "swift_container_input={2}" -e "swift_container_output={3}" -e "OS_AUTH_URL=https://identity1.citycloud.com:5000/v3/" -e "OS_AUTH_VERSION=3" -e "OS_TENANT_NAME=UU_KTH_PhenoMeNal" -e "OS_TENANT_ID=17bcdf88f1fd40de85f53b5038722681" -e "OS_REGION_NAME=Lon1" -e "OS_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USER_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USERNAME={4}" -e "OS_PASSWORD={5}" farmbio/mtbls233-openms /bin/OpenMS.py FeatureFinderMetabo -in {{infile}} -out {{outfile}} -ini /params_MTBLS233/FFparam.ini'
swiftlist = subprocess.check_output(["swift", "list", "anders-test", "-p", "MTBLS233-POP/pp_out/"])
for aFile in swiftlist.splitlines():
print aFile
inputfile = aFile
outputfile = "/MTBLS233-POP/ff_out/" + os.path.basename(aFile) + ".featureXML"
command = dockerCommand2.format(inputfile, outputfile, swift_container_input, swift_container_output, os_username, os_password)
stdout = subprocess.check_output( command, shell=True )
print "stdout: " + stdout
dockerCommand3 = 'docker run -e "infile={0}" -e "outfile={1}" -e "swift_container_input={2}" -e "swift_container_output={3}" -e "OS_AUTH_URL=https://identity1.citycloud.com:5000/v3/" -e "OS_AUTH_VERSION=3" -e "OS_TENANT_NAME=UU_KTH_PhenoMeNal" -e "OS_TENANT_ID=17bcdf88f1fd40de85f53b5038722681" -e "OS_REGION_NAME=Lon1" -e "OS_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USER_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USERNAME={4}" -e "OS_PASSWORD={5}" farmbio/mtbls233-openms /bin/OpenMS.py FeatureLinkerUnlabeledQT -in {{infile}} -out {{outfile}} -ini /params_MTBLS233/FLparam.ini'
inputfile = "MTBLS233-POP/ff_out/"
outputfile = "/MTBLS233-POP/fl_out/" + "MTBLS233-POP.consensusXML"
command = dockerCommand3.format(inputfile, outputfile, swift_container_input, swift_container_output, os_username, os_password)
stdout = subprocess.check_output( command, shell=True )
print "stdout: " + stdout
dockerCommand4 = 'docker run -e "infile={0}" -e "outfile={1}" -e "swift_container_input={2}" -e "swift_container_output={3}" -e "OS_AUTH_URL=https://identity1.citycloud.com:5000/v3/" -e "OS_AUTH_VERSION=3" -e "OS_TENANT_NAME=UU_KTH_PhenoMeNal" -e "OS_TENANT_ID=17bcdf88f1fd40de85f53b5038722681" -e "OS_REGION_NAME=Lon1" -e "OS_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USER_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USERNAME={4}" -e "OS_PASSWORD={5}" farmbio/mtbls233-openms /bin/OpenMS.py FileFilter -in {{infile}} -out {{outfile}} -ini /params_MTBLS233/FileFparam.ini'
swiftlist = subprocess.check_output(["swift", "list", "anders-test", "-p", "MTBLS233-POP/fl_out/"])
for aFile in swiftlist.splitlines():
print aFile
inputfile = aFile
outputfile = "/MTBLS233-POP/filef_out/" + "MTBLS233-POP.consensusXML"
command = dockerCommand4.format(inputfile, outputfile, swift_container_input, swift_container_output, os_username, os_password)
stdout = subprocess.check_output( command, shell=True )
print "stdout: " + stdout
dockerCommand5 = 'docker run -e "infile={0}" -e "outfile={1}" -e "swift_container_input={2}" -e "swift_container_output={3}" -e "OS_AUTH_URL=https://identity1.citycloud.com:5000/v3/" -e "OS_AUTH_VERSION=3" -e "OS_TENANT_NAME=UU_KTH_PhenoMeNal" -e "OS_TENANT_ID=17bcdf88f1fd40de85f53b5038722681" -e "OS_REGION_NAME=Lon1" -e "OS_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USER_DOMAIN_ID=08ba9f88ca5647b0ad4d651698eef3d0" -e "OS_USERNAME={4}" -e "OS_PASSWORD={5}" farmbio/mtbls233-openms /bin/OpenMS.py TextExporter -in {{infile}} -out {{outfile}} -ini /params_MTBLS233/TEparam.ini'
swiftlist = subprocess.check_output(["swift", "list", "anders-test", "-p", "MTBLS233-POP/filef_out/"])
for aFile in swiftlist.splitlines():
print aFile
inputfile = aFile
outputfile = "/MTBLS233-POP/te_out/" + "Result.csv"
command = dockerCommand5.format(inputfile, outputfile, swift_container_input, swift_container_output, os_username, os_password)
stdout = subprocess.check_output( command, shell=True )
print "stdout: " + stdout
except Exception as err:
print "Exception formated:" + format(err)
|
{
"content_hash": "b86ec2b3490d73b50e0b246e6eabd9c8",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 602,
"avg_line_length": 78.39189189189189,
"alnum_prop": 0.7160834338907085,
"repo_name": "phnmnl/MTBLS233-POP",
"id": "811f87075757f190af07e7386bfdad70813bf1c6",
"size": "5819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "container-openms/test/demo_runner.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "242652"
},
{
"name": "Python",
"bytes": "10234"
},
{
"name": "R",
"bytes": "11206"
},
{
"name": "Shell",
"bytes": "1361"
}
],
"symlink_target": ""
}
|
from subprocess import call
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", help="port to start service on")
args = parser.parse_args()
if (args.port == None):
port = "8080"
else:
port = args.port
call(["docker", "run", "-i", "-p", port+":8080", "-t", "wut"])
|
{
"content_hash": "b330631f3edc9e1036c7d84d5476ae4f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 21.066666666666666,
"alnum_prop": 0.6455696202531646,
"repo_name": "WebUtilityKit/api-server",
"id": "5bed21771ba3f1ffa0e1a4c858792d3d490744d8",
"size": "335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "start.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "1119746"
},
{
"name": "JavaScript",
"bytes": "3422"
},
{
"name": "Python",
"bytes": "5253"
},
{
"name": "Shell",
"bytes": "7081"
}
],
"symlink_target": ""
}
|
import random
import string
from ..base import Factory
class RandomLengthStringFactory(Factory):
MIN_CHAR_DEFAULT = 3
MAX_CHAR_DEFAULT = 100
def __init__(self, min_chars=None, max_chars=None, prefix=None, suffix=None):
super(RandomLengthStringFactory, self).__init__()
self._min_chars = min_chars if min_chars else self.MIN_CHAR_DEFAULT
self._max_chars = max_chars if max_chars else self.MAX_CHAR_DEFAULT
self._prefix = prefix if prefix else ''
self._suffix = suffix if suffix else ''
if type(self._min_chars) != int:
raise TypeError("min_chars needs to be an integer")
if type(self._max_chars) != int:
raise TypeError("max_chars needs to be an integer")
def __call__(self):
length = random.randint(self._min_chars, self._max_chars)
random_string = [random.choice(string.ascii_letters) for i in xrange(length)]
random_string.insert(0, self._prefix)
random_string.append(self._suffix)
return ''.join(random_string)
class HashHexDigestFactory(Factory):
"""
Returns on each iteration the result of the hash `hash_class`.hexdigest(), generated
from the pseudo random string.
:param hash_class: Any hash class from the hashlib package, like hashlib.md5
:param element_amount: The amount of values that will be generated
Examples,
>> for i in HashHexDigestFactory(hashlib.md5).generate(3):
.. print i
aaaa6305d730ca70eae904ca47e427c8
d172baa4019279f3f78a624f2a0b3e2b
78cd377dc9421cd4252d8110f9acb7c4
>> for i in HashHexDigestFactory(hashlib.sha224).generate(3):
.. print i
8dfd75184b6b5f9be73050dc084a8a3ebcf4c45fc5ca334df911c7c5
ee1822b3cd7f58eb81bd37b7e5933d73a62578a2c060e7e4808569d0
3c2ecb8fd519795f77620614ed5b45ccd611a12aa9d355683ac791d9
"""
_MAX_VALUE_LENGTH = 100
_MIN_VALUE_LENGTH = 3
def __init__(self, hash_class):
super(HashHexDigestFactory, self).__init__()
self._hash_class = hash_class
def __call__(self):
length = random.randint(self._MIN_VALUE_LENGTH, self._MAX_VALUE_LENGTH)
random_string = ''.join([random.choice(string.ascii_letters) for i in xrange(length)])
return self._hash_class(random_string).hexdigest()
|
{
"content_hash": "d22d6421606e3ffe7d204d2030f9d79b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 94,
"avg_line_length": 36.15625,
"alnum_prop": 0.6806395851339672,
"repo_name": "arieb/python-testdata",
"id": "fc02d9c9e775b5c83ce403d1e719f62e6fe97894",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testdata/factories/strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33800"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from . import common
from .. import rw
from .call_continue import CallContinueMessage
from .types import Types
class CallResponseContinueMessage(CallContinueMessage):
"""Represent a continuation of a call response (across multiple frames)."""
message_type = Types.CALL_RES_CONTINUE
def __init__(
self,
flags=0,
checksum=None,
args=None,
id=0,
):
super(CallResponseContinueMessage, self).__init__(
flags, checksum, args, id)
def fragment(self, space_left):
fragment_msg = CallResponseContinueMessage(
flags=self.flags,
checksum=self.checksum,
)
return super(CallResponseContinueMessage, self).fragment(
space_left, fragment_msg)
call_res_c_rw = rw.instance(
CallResponseContinueMessage,
("flags", rw.number(1)), # flags:1
("checksum", common.checksum_rw), # csumtype:1 (csum:4){0, 1}
("args", rw.args(rw.number(2))), # [arg1~2, arg2~2, arg3~2]
)
|
{
"content_hash": "fb8561f7880bcb40508f32d12b6cf4d6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 29.27777777777778,
"alnum_prop": 0.6299810246679317,
"repo_name": "uber/tchannel-python",
"id": "f01c47d922c72f979e08b72a5b2d4f74c876670c",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tchannel/messages/call_response_continue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "597"
},
{
"name": "Makefile",
"bytes": "3621"
},
{
"name": "Python",
"bytes": "776222"
},
{
"name": "Shell",
"bytes": "1484"
},
{
"name": "Thrift",
"bytes": "13128"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
bp = Blueprint('personal_access_token.web', __name__)
@bp.before_request
def before_request():
bp.app.call_before_request_funcs()
@bp.route('/')
def index():
return render_template('personal_access_token/index.html')
|
{
"content_hash": "e9caed28e66887f4af563acb08fa2ff3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 23.272727272727273,
"alnum_prop": 0.7109375,
"repo_name": "soasme/flask-personal-access-token",
"id": "6061726c59c4ba897363c3d3da2484f55f17a666",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_personal_access_token/web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1502"
},
{
"name": "JavaScript",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "9683"
}
],
"symlink_target": ""
}
|
from django import forms
class ConfirmationWidget(forms.TextInput):
template_name = "account/widgets/confirmation.html"
def __init__(self, phrase, **kwargs):
self.phrase = phrase
super(ConfirmationWidget, self).__init__(**kwargs)
def get_context(self, *args):
context = super(ConfirmationWidget, self).get_context(*args)
context["widget"].update({"phrase": self.phrase})
return context
|
{
"content_hash": "11eb290b52f40833f9c5bfb792a0efd7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.6628959276018099,
"repo_name": "fin/froide",
"id": "caa3e6889e3916afbe8185e7f85c7de4a49f3410",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/account/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
}
|
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from openstack.common.gettextutils import _
"""
import copy
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
t = gettext.translation(domain,
localedir=self.localedir,
fallback=True)
# Use the appropriate method of the translation object based
# on the python version.
m = t.gettext if six.PY3 else t.ugettext
def f(msg):
"""oslo.i18n.gettextutils translation function."""
if USE_LAZY:
return Message(msg, domain=domain)
return m(msg)
return f
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('brickclient')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
Note that to enable lazy translation, enable_lazy must be
called.
:param domain: the translation domain
"""
from six import moves
tf = TranslatorFactory(domain)
moves.builtins.__dict__['_'] = tf.primary
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='brickclient', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale_, alias) in six.iteritems(aliases):
if locale_ in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
{
"content_hash": "07941796dac7890e9346b5311705dcdb",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 79,
"avg_line_length": 37.54427645788337,
"alnum_prop": 0.6367140309497785,
"repo_name": "e0ne/python-brickagentclient",
"id": "98d5a738740f28c1592eb5de40ebcae60caff9fa",
"size": "18039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brickclient/openstack/common/gettextutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "255253"
},
{
"name": "Shell",
"bytes": "6954"
}
],
"symlink_target": ""
}
|
from libqtile.config import Key, Screen, Group, Drag, Click
from libqtile.lazy import lazy
from libqtile import layout, bar, widget
from typing import List # noqa: F401
mod = "mod4"
keys = [
# Switch between windows in current stack pane
Key([mod], "k", lazy.layout.down()),
Key([mod], "j", lazy.layout.up()),
# Move windows up or down in current stack
Key([mod, "control"], "k", lazy.layout.shuffle_down()),
Key([mod, "control"], "j", lazy.layout.shuffle_up()),
# Switch window focus to other pane(s) of stack
Key([mod], "space", lazy.layout.next()),
# Swap panes of split stack
Key([mod, "shift"], "space", lazy.layout.rotate()),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod, "shift"], "Return", lazy.layout.toggle_split()),
Key([mod], "Return", lazy.spawn("alacritty")),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout()),
Key([mod], "w", lazy.window.kill()),
Key([mod, "control"], "r", lazy.restart()),
Key([mod, "control"], "q", lazy.shutdown()),
Key([mod], "r", lazy.spawncmd()),
]
groups = [Group(i) for i in "asdfuiop"]
for i in groups:
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], i.name, lazy.group[i.name].toscreen()),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([mod, "shift"], i.name, lazy.window.togroup(i.name, switch_group=True)),
# Or, use below if you prefer not to switch to that group.
# # mod1 + shift + letter of group = move focused window to group
# Key([mod, "shift"], i.name, lazy.window.togroup(i.name)),
])
layouts = [
layout.Max(),
layout.Stack(num_stacks=2),
# Try more layouts by unleashing below layouts.
# layout.Bsp(),
# layout.Columns(),
layout.Matrix(),
# layout.MonadTall(),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.Tile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
widget_defaults = dict(
font='mono',
fontsize=16,
padding=3,
border_width=0,
margin_x=0,
margin_y=0,
line_width=3,
)
extension_defaults = widget_defaults.copy()
class Tango(widget.base.ThreadedPollText):
orientations = widget.base.ORIENTATION_HORIZONTAL
defaults = [
("format", "{label}: {value}{unit}", "Formatting for field names."),
]
def __init__(self, attr_name, **config):
self._attr_name = attr_name
self._attr = None
super().__init__(**config)
self.add_defaults(Tango.defaults)
def poll(self):
import tango
if self._attr is None:
try:
self._attr = tango.AttributeProxy(self._attr_name)
config = self._attr.get_config()
self._config = dict(label=config.label, unit=config.unit)
except tango.DevFailed as err:
return str(err)
try:
value = '{:.3f}'.format(self._attr.read().value)
except tango.DevFailed as err:
value = '-----'
self._config['value'] = value
return self.format.format(**self._config)
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayout(),
widget.GroupBox(font='sans', fontsize=12, highlight_method='block'),
widget.Prompt(),
widget.WindowName(),
widget.Sep(foreground='AAAAAA'),
# Tango('controls05:10000/sys/tg_test/1/double_scalar'),
# widget.Sep(foreground='AAAAAA'),
widget.Net(),
widget.NetGraph(
graph_color='FF2222',
fill_color='AA2222'
),
widget.Sep(foreground='AAAAAA'),
widget.DF(),
widget.CPU(),
widget.CPUGraph(
graph_color='22FF22',
fill_color='22AA22',
),
widget.Sep(foreground='AAAAAA'),
widget.Memory(format='Mem: {MemUsed}MB'),
widget.MemoryGraph(),
widget.Sep(foreground='AAAAAA'),
widget.Battery(
full_char='\U0001F970',
discharge_char='\U0001F44E',
empty_char='\U0001F976',
charge_char='\U0001F44D',
format='\U0001F50B{char}{percent:2.0%}'
),
widget.Sep(foreground='AAAAAA'),
widget.Systray(),
widget.Sep(foreground='AAAAAA'),
widget.Clock(format='%Y-%m-%d %H:%M'),
widget.QuickExit(
default_text='[\U0001F6AA]',
countdown_format='[{}s]'
),
],
24,
background='888888',
opacity=1
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
{'wmclass': 'confirm'},
{'wmclass': 'dialog'},
{'wmclass': 'download'},
{'wmclass': 'error'},
{'wmclass': 'file_progress'},
{'wmclass': 'notification'},
{'wmclass': 'splash'},
{'wmclass': 'toolbar'},
{'wmclass': 'confirmreset'}, # gitk
{'wmclass': 'makebranch'}, # gitk
{'wmclass': 'maketag'}, # gitk
{'wname': 'branchdialog'}, # gitk
{'wname': 'pinentry'}, # GPG key password entry
{'wmclass': 'ssh-askpass'}, # ssh-askpass
])
auto_fullscreen = True
focus_on_window_activation = "smart"
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
{
"content_hash": "9cfba0783b831cecfa4fcd31274c7337",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 84,
"avg_line_length": 32.737864077669904,
"alnum_prop": 0.5631672597864769,
"repo_name": "tiagocoutinho/tc",
"id": "5702d8a4ab664198e8de2f225c8d554d42195a8b",
"size": "6744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".config/qtile/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "4030"
},
{
"name": "Python",
"bytes": "1513"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
# Module under test
from bokeh import models
from bokeh.model import Model
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def all_descriptors():
for name in dir(models):
model = getattr(models, name)
try:
if not issubclass(model, Model):
continue
except TypeError:
continue
for prop in model.properties(with_bases=False):
descriptor = getattr(model, prop)
yield (name, descriptor)
@pytest.mark.parametrize("name, descriptor", list(all_descriptors()))
@pytest.mark.unit
def test_default_values(name, descriptor):
p = descriptor.property
assert p.is_valid(p._raw_default()) is True, "%s.%s has an invalid default value" % (name, descriptor.name)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "ca0675e1913ee8c27544a95549d26d08",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 111,
"avg_line_length": 32.679245283018865,
"alnum_prop": 0.3608545034642032,
"repo_name": "timsnyder/bokeh",
"id": "42ed5c1dbb73c8b46ed96b4213eb94cdfde21ba6",
"size": "2236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/models/tests/test_defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
}
|
from testtools import TestCase
from mock import Mock
from troveclient import users
from troveclient import base
"""
Unit tests for users.py
"""
class UserTest(TestCase):
def setUp(self):
super(UserTest, self).setUp()
self.orig__init = users.User.__init__
users.User.__init__ = Mock(return_value=None)
self.user = users.User()
def tearDown(self):
super(UserTest, self).tearDown()
users.User.__init__ = self.orig__init
def test___repr__(self):
self.user.name = "user-1"
self.assertEqual('<User: user-1>', self.user.__repr__())
class UsersTest(TestCase):
def setUp(self):
super(UsersTest, self).setUp()
self.orig__init = users.Users.__init__
users.Users.__init__ = Mock(return_value=None)
self.users = users.Users()
self.users.api = Mock()
self.users.api.client = Mock()
self.orig_base_getid = base.getid
base.getid = Mock(return_value="instance1")
def tearDown(self):
super(UsersTest, self).tearDown()
users.Users.__init__ = self.orig__init
base.getid = self.orig_base_getid
def _get_mock_method(self):
self._resp = Mock()
self._body = None
self._url = None
def side_effect_func(url, body=None):
self._body = body
self._url = url
return (self._resp, body)
return Mock(side_effect=side_effect_func)
def _build_fake_user(self, name, hostname=None, password=None,
databases=None):
return {'name': name,
'password': password if password else 'password',
'host': hostname,
'databases': databases if databases else [],
}
def test_create(self):
self.users.api.client.post = self._get_mock_method()
self._resp.status = 200
user = self._build_fake_user('user1')
self.users.create(23, [user])
self.assertEqual('/instances/23/users', self._url)
self.assertEqual({"users": [user]}, self._body)
# Even if host isn't supplied originally,
# the default is supplied.
del user['host']
self.users.create(23, [user])
self.assertEqual('/instances/23/users', self._url)
user['host'] = '%'
self.assertEqual({"users": [user]}, self._body)
# If host is supplied, of course it's put into the body.
user['host'] = '127.0.0.1'
self.users.create(23, [user])
self.assertEqual({"users": [user]}, self._body)
# Make sure that response of 400 is recognized as an error.
user['host'] = '%'
self._resp.status = 400
self.assertRaises(Exception, self.users.create, 12, [user])
def test_delete(self):
self.users.api.client.delete = self._get_mock_method()
self._resp.status = 200
self.users.delete(27, 'user1')
self.assertEqual('/instances/27/users/user1', self._url)
self._resp.status = 400
self.assertRaises(Exception, self.users.delete, 34, 'user1')
def test__list(self):
def side_effect_func(self, val):
return val
key = 'key'
body = Mock()
body.get = Mock(return_value=[{'href': 'http://test.net/test_file',
'rel': 'next'}])
body.__getitem__ = Mock(return_value=["test-value"])
resp = Mock()
resp.status = 200
self.users.resource_class = Mock(side_effect=side_effect_func)
self.users.api.client.get = Mock(return_value=(resp, body))
self.assertEqual(["test-value"], self.users._list('url', key).items)
self.users.api.client.get = Mock(return_value=(resp, None))
self.assertRaises(Exception, self.users._list, 'url', None)
def test_list(self):
def side_effect_func(path, user, limit, marker):
return path
self.users._list = Mock(side_effect=side_effect_func)
self.assertEqual('/instances/instance1/users', self.users.list(1))
|
{
"content_hash": "748aab0ee0d98a9a0b5db46e52945856",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.5758688203622124,
"repo_name": "neumerance/deploy",
"id": "0fc32f683da11c2934722ebdff6541cc65a7b43c",
"size": "4086",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/troveclient/tests/test_users.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:2b2e2fd1-b225-11e4-b26c-14109fe53921')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import _cvc as _ImportedBinding__cvc
import _cbc as _ImportedBinding__cbc
import _cac as _ImportedBinding__cac
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.w3.org/ns/corevocabulary/AggregateComponents', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cvc = _ImportedBinding__cvc.Namespace
_Namespace_cvc.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cac = _ImportedBinding__cac.Namespace
_Namespace_cac.configureCategories(['typeBinding', 'elementBinding'])
_Namespace_cbc = _ImportedBinding__cbc.Namespace
_Namespace_cbc.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}CompanyActivityType with content type ELEMENT_ONLY
class CompanyActivityType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CompanyActivityType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 42, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}ActivityCode uses Python identifier ActivityCode
__ActivityCode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityCode'), 'ActivityCode', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CompanyActivityType_httpwww_w3_orgnscorevocabularyBasicComponentsActivityCode', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 24, 3), )
ActivityCode = property(__ActivityCode.value, __ActivityCode.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}ActivityDescription uses Python identifier ActivityDescription
__ActivityDescription = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityDescription'), 'ActivityDescription', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CompanyActivityType_httpwww_w3_orgnscorevocabularyBasicComponentsActivityDescription', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 25, 3), )
ActivityDescription = property(__ActivityDescription.value, __ActivityDescription.set, None, None)
_ElementMap.update({
__ActivityCode.name() : __ActivityCode,
__ActivityDescription.name() : __ActivityDescription
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'CompanyActivityType', CompanyActivityType)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}CvaddressType with content type ELEMENT_ONLY
class CvaddressType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CvaddressType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 90, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}AdminunitFirstline uses Python identifier AdminunitFirstline
__AdminunitFirstline = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AdminunitFirstline'), 'AdminunitFirstline', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvaddressType_httpwww_w3_orgnscorevocabularyBasicComponentsAdminunitFirstline', True, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 26, 3), )
AdminunitFirstline = property(__AdminunitFirstline.value, __AdminunitFirstline.set, None, None)
_ElementMap.update({
__AdminunitFirstline.name() : __AdminunitFirstline
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'CvaddressType', CvaddressType)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}CvbusinessType with content type ELEMENT_ONLY
class CvbusinessType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CvbusinessType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 120, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}BusinessAddress uses Python identifier BusinessAddress
__BusinessAddress = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'BusinessAddress'), 'BusinessAddress', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyAggregateComponentsBusinessAddress', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 27, 3), )
BusinessAddress = property(__BusinessAddress.value, __BusinessAddress.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}CompanyActivity uses Python identifier CompanyActivity
__CompanyActivity = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'CompanyActivity'), 'CompanyActivity', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyAggregateComponentsCompanyActivity', True, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 29, 3), )
CompanyActivity = property(__CompanyActivity.value, __CompanyActivity.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}AlternativeName uses Python identifier AlternativeName
__AlternativeName = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AlternativeName'), 'AlternativeName', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyBasicComponentsAlternativeName', True, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 27, 3), )
AlternativeName = property(__AlternativeName.value, __AlternativeName.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}CompanyStatusCode uses Python identifier CompanyStatusCode
__CompanyStatusCode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyStatusCode'), 'CompanyStatusCode', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyBasicComponentsCompanyStatusCode', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 28, 3), )
CompanyStatusCode = property(__CompanyStatusCode.value, __CompanyStatusCode.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}CompanyTypeCode uses Python identifier CompanyTypeCode
__CompanyTypeCode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyTypeCode'), 'CompanyTypeCode', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyBasicComponentsCompanyTypeCode', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 29, 3), )
CompanyTypeCode = property(__CompanyTypeCode.value, __CompanyTypeCode.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}LegalID uses Python identifier LegalID
__LegalID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalID'), 'LegalID', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyBasicComponentsLegalID', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 37, 3), )
LegalID = property(__LegalID.value, __LegalID.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}LegalName uses Python identifier LegalName
__LegalName = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalName'), 'LegalName', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvbusinessType_httpwww_w3_orgnscorevocabularyBasicComponentsLegalName', True, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 38, 3), )
LegalName = property(__LegalName.value, __LegalName.set, None, None)
_ElementMap.update({
__BusinessAddress.name() : __BusinessAddress,
__CompanyActivity.name() : __CompanyActivity,
__AlternativeName.name() : __AlternativeName,
__CompanyStatusCode.name() : __CompanyStatusCode,
__CompanyTypeCode.name() : __CompanyTypeCode,
__LegalID.name() : __LegalID,
__LegalName.name() : __LegalName
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'CvbusinessType', CvbusinessType)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}CvlocationType with content type ELEMENT_ONLY
class CvlocationType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CvlocationType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 247, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2}Address uses Python identifier Address
__Address = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cac, 'Address'), 'Address', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvlocationType_urnoasisnamesspecificationublschemaxsdCommonAggregateComponents_2Address', True, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonAggregateComponents-2.1.xsd', 45, 3), )
Address = property(__Address.value, __Address.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}GeographicID uses Python identifier GeographicID
__GeographicID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicID'), 'GeographicID', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvlocationType_httpwww_w3_orgnscorevocabularyBasicComponentsGeographicID', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 31, 3), )
GeographicID = property(__GeographicID.value, __GeographicID.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}GeographicName uses Python identifier GeographicName
__GeographicName = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicName'), 'GeographicName', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvlocationType_httpwww_w3_orgnscorevocabularyBasicComponentsGeographicName', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 32, 3), )
GeographicName = property(__GeographicName.value, __GeographicName.set, None, None)
_ElementMap.update({
__Address.name() : __Address,
__GeographicID.name() : __GeographicID,
__GeographicName.name() : __GeographicName
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'CvlocationType', CvlocationType)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}CvpersonType with content type ELEMENT_ONLY
class CvpersonType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CvpersonType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 309, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2}Person uses Python identifier Person
__Person = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cac, 'Person'), 'Person', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_urnoasisnamesspecificationublschemaxsdCommonAggregateComponents_2Person', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonAggregateComponents-2.1.xsd', 406, 3), )
Person = property(__Person.value, __Person.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}BirthPlaceCvlocation uses Python identifier BirthPlaceCvlocation
__BirthPlaceCvlocation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'BirthPlaceCvlocation'), 'BirthPlaceCvlocation', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_httpwww_w3_orgnscorevocabularyAggregateComponentsBirthPlaceCvlocation', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 26, 3), )
BirthPlaceCvlocation = property(__BirthPlaceCvlocation.value, __BirthPlaceCvlocation.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}CitizenshipJurisdiction uses Python identifier CitizenshipJurisdiction
__CitizenshipJurisdiction = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'CitizenshipJurisdiction'), 'CitizenshipJurisdiction', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_httpwww_w3_orgnscorevocabularyAggregateComponentsCitizenshipJurisdiction', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 28, 3), )
CitizenshipJurisdiction = property(__CitizenshipJurisdiction.value, __CitizenshipJurisdiction.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}DeathPlaceCvlocation uses Python identifier DeathPlaceCvlocation
__DeathPlaceCvlocation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'DeathPlaceCvlocation'), 'DeathPlaceCvlocation', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_httpwww_w3_orgnscorevocabularyAggregateComponentsDeathPlaceCvlocation', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 34, 3), )
DeathPlaceCvlocation = property(__DeathPlaceCvlocation.value, __DeathPlaceCvlocation.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/AggregateComponents}ResidencyJurisdiction uses Python identifier ResidencyJurisdiction
__ResidencyJurisdiction = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ResidencyJurisdiction'), 'ResidencyJurisdiction', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_httpwww_w3_orgnscorevocabularyAggregateComponentsResidencyJurisdiction', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 39, 3), )
ResidencyJurisdiction = property(__ResidencyJurisdiction.value, __ResidencyJurisdiction.set, None, None)
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}DeathDate uses Python identifier DeathDate
__DeathDate = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'DeathDate'), 'DeathDate', '__httpwww_w3_orgnscorevocabularyAggregateComponents_CvpersonType_httpwww_w3_orgnscorevocabularyBasicComponentsDeathDate', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 30, 3), )
DeathDate = property(__DeathDate.value, __DeathDate.set, None, None)
_ElementMap.update({
__Person.name() : __Person,
__BirthPlaceCvlocation.name() : __BirthPlaceCvlocation,
__CitizenshipJurisdiction.name() : __CitizenshipJurisdiction,
__DeathPlaceCvlocation.name() : __DeathPlaceCvlocation,
__ResidencyJurisdiction.name() : __ResidencyJurisdiction,
__DeathDate.name() : __DeathDate
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'CvpersonType', CvpersonType)
# Complex type {http://www.w3.org/ns/corevocabulary/AggregateComponents}JurisdictionType with content type ELEMENT_ONLY
class JurisdictionType (pyxb.binding.basis.complexTypeDefinition):
"""
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'JurisdictionType')
_XSDLocation = pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 423, 3)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/ns/corevocabulary/BasicComponents}JurisdictionID uses Python identifier JurisdictionID
__JurisdictionID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cvc, 'JurisdictionID'), 'JurisdictionID', '__httpwww_w3_orgnscorevocabularyAggregateComponents_JurisdictionType_httpwww_w3_orgnscorevocabularyBasicComponentsJurisdictionID', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 36, 3), )
JurisdictionID = property(__JurisdictionID.value, __JurisdictionID.set, None, None)
# Element {urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}Name uses Python identifier Name
__Name = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(_Namespace_cbc, 'Name'), 'Name', '__httpwww_w3_orgnscorevocabularyAggregateComponents_JurisdictionType_urnoasisnamesspecificationublschemaxsdCommonBasicComponents_2Name', False, pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 475, 3), )
Name = property(__Name.value, __Name.set, None, None)
_ElementMap.update({
__JurisdictionID.name() : __JurisdictionID,
__Name.name() : __Name
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'JurisdictionType', JurisdictionType)
BirthPlaceCvlocation = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BirthPlaceCvlocation'), CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 26, 3))
Namespace.addCategoryObject('elementBinding', BirthPlaceCvlocation.name().localName(), BirthPlaceCvlocation)
BusinessAddress = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BusinessAddress'), _ImportedBinding__cac.AddressType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 27, 3))
Namespace.addCategoryObject('elementBinding', BusinessAddress.name().localName(), BusinessAddress)
CitizenshipJurisdiction = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CitizenshipJurisdiction'), JurisdictionType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 28, 3))
Namespace.addCategoryObject('elementBinding', CitizenshipJurisdiction.name().localName(), CitizenshipJurisdiction)
CompanyActivity = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CompanyActivity'), CompanyActivityType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 29, 3))
Namespace.addCategoryObject('elementBinding', CompanyActivity.name().localName(), CompanyActivity)
Cvaddress = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cvaddress'), CvaddressType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 30, 3))
Namespace.addCategoryObject('elementBinding', Cvaddress.name().localName(), Cvaddress)
Cvbusiness = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cvbusiness'), CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 31, 3))
Namespace.addCategoryObject('elementBinding', Cvbusiness.name().localName(), Cvbusiness)
Cvlocation = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cvlocation'), CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 32, 3))
Namespace.addCategoryObject('elementBinding', Cvlocation.name().localName(), Cvlocation)
Cvperson = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cvperson'), CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 33, 3))
Namespace.addCategoryObject('elementBinding', Cvperson.name().localName(), Cvperson)
DeathPlaceCvlocation = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DeathPlaceCvlocation'), CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 34, 3))
Namespace.addCategoryObject('elementBinding', DeathPlaceCvlocation.name().localName(), DeathPlaceCvlocation)
HealthCareProviderCvaddress = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'HealthCareProviderCvaddress'), CvaddressType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 35, 3))
Namespace.addCategoryObject('elementBinding', HealthCareProviderCvaddress.name().localName(), HealthCareProviderCvaddress)
HealthInsuranceOrganizationCvaddress = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'HealthInsuranceOrganizationCvaddress'), CvaddressType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 36, 3))
Namespace.addCategoryObject('elementBinding', HealthInsuranceOrganizationCvaddress.name().localName(), HealthInsuranceOrganizationCvaddress)
Jurisdiction = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Jurisdiction'), JurisdictionType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 37, 3))
Namespace.addCategoryObject('elementBinding', Jurisdiction.name().localName(), Jurisdiction)
RequestingParty = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RequestingParty'), _ImportedBinding__cac.PartyType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 38, 3))
Namespace.addCategoryObject('elementBinding', RequestingParty.name().localName(), RequestingParty)
ResidencyJurisdiction = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ResidencyJurisdiction'), JurisdictionType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 39, 3))
Namespace.addCategoryObject('elementBinding', ResidencyJurisdiction.name().localName(), ResidencyJurisdiction)
CompanyActivityType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityCode'), _ImportedBinding__cvc.ActivityCodeType, scope=CompanyActivityType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 24, 3)))
CompanyActivityType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityDescription'), _ImportedBinding__cvc.ActivityDescriptionType, scope=CompanyActivityType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 25, 3)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CompanyActivityType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityCode')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 54, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CompanyActivityType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'ActivityDescription')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 71, 9))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CompanyActivityType._Automaton = _BuildAutomaton()
CvaddressType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AdminunitFirstline'), _ImportedBinding__cvc.AdminunitFirstlineType, scope=CvaddressType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 26, 3)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 102, 9))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CvaddressType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AdminunitFirstline')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 102, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CvaddressType._Automaton = _BuildAutomaton_()
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BusinessAddress'), _ImportedBinding__cac.AddressType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 27, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CompanyActivity'), CompanyActivityType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 29, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AlternativeName'), _ImportedBinding__cvc.AlternativeNameType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 27, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyStatusCode'), _ImportedBinding__cvc.CompanyStatusCodeType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 28, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyTypeCode'), _ImportedBinding__cvc.CompanyTypeCodeType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 29, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalID'), _ImportedBinding__cvc.LegalIDType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 37, 3)))
CvbusinessType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalName'), _ImportedBinding__cvc.LegalNameType, scope=CvbusinessType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 38, 3)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 148, 9))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 164, 9))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 180, 9))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 196, 9))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 212, 9))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 228, 9))
counters.add(cc_5)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalID')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 132, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'LegalName')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 148, 9))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'AlternativeName')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 164, 9))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyStatusCode')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 180, 9))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'CompanyTypeCode')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 196, 9))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'CompanyActivity')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 212, 9))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CvbusinessType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BusinessAddress')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 228, 9))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
transitions.append(fac.Transition(st_6, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_5, True) ]))
st_6._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CvbusinessType._Automaton = _BuildAutomaton_2()
CvlocationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cac, 'Address'), _ImportedBinding__cac.AddressType, scope=CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonAggregateComponents-2.1.xsd', 45, 3)))
CvlocationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicID'), _ImportedBinding__cvc.GeographicIDType, scope=CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 31, 3)))
CvlocationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicName'), _ImportedBinding__cvc.GeographicNameType, scope=CvlocationType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 32, 3)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 259, 9))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 275, 9))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 291, 9))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CvlocationType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicID')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 259, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CvlocationType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'GeographicName')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 275, 9))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CvlocationType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cac, 'Address')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 291, 9))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CvlocationType._Automaton = _BuildAutomaton_3()
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cac, 'Person'), _ImportedBinding__cac.PersonType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonAggregateComponents-2.1.xsd', 406, 3)))
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'BirthPlaceCvlocation'), CvlocationType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 26, 3)))
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CitizenshipJurisdiction'), JurisdictionType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 28, 3)))
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DeathPlaceCvlocation'), CvlocationType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 34, 3)))
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ResidencyJurisdiction'), JurisdictionType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 39, 3)))
CvpersonType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'DeathDate'), _ImportedBinding__cvc.DeathDateType, scope=CvpersonType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 30, 3)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 321, 9))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 337, 9))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 354, 9))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 371, 9))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 388, 9))
counters.add(cc_4)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'DeathDate')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 321, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ResidencyJurisdiction')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 337, 9))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'CitizenshipJurisdiction')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 354, 9))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'DeathPlaceCvlocation')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 371, 9))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'BirthPlaceCvlocation')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 388, 9))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CvpersonType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cac, 'Person')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 405, 9))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CvpersonType._Automaton = _BuildAutomaton_4()
JurisdictionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cvc, 'JurisdictionID'), _ImportedBinding__cvc.JurisdictionIDType, scope=JurisdictionType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyBasicComponents.xsd', 36, 3)))
JurisdictionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(_Namespace_cbc, 'Name'), _ImportedBinding__cbc.NameType, scope=JurisdictionType, location=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/common/UBL-CommonBasicComponents-2.1.xsd', 475, 3)))
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 451, 9))
counters.add(cc_0)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(JurisdictionType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cvc, 'JurisdictionID')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 435, 9))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(JurisdictionType._UseForTag(pyxb.namespace.ExpandedName(_Namespace_cbc, 'Name')), pyxb.utils.utility.Location('/Users/radu/Projects/esens/edocument/profiles/e_confirmation/xsd/request/CoreVocabularyAggregateComponents.xsd', 451, 9))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
JurisdictionType._Automaton = _BuildAutomaton_5()
|
{
"content_hash": "8b0508c74c82a38acbbdb96abdd641bc",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 465,
"avg_line_length": 70.00973236009732,
"alnum_prop": 0.7716167373323138,
"repo_name": "getodacu/eSENS-eDocument",
"id": "8201259248d4f9696cff7e5fe4c64a644aa3a0eb",
"size": "57817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/e_confirmation/xb_request/_cva.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8407933"
},
{
"name": "Shell",
"bytes": "293"
}
],
"symlink_target": ""
}
|
from sklearn import datasets
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
iris = datasets.load_iris()
X = iris.data[:, [2,3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
|
{
"content_hash": "2d8116f988f1e94db01a83553f3ae8ac",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 22.31578947368421,
"alnum_prop": 0.7287735849056604,
"repo_name": "finger563/learningTF",
"id": "aaedb3c0f879431556b867f26dcf421e98a54ea0",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/ch3/scikit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19815"
}
],
"symlink_target": ""
}
|
from dal.test import case, stories
from dal.test.utils import OwnedFixtures
from dal_select2.test import Select2Story
from .models import TModel
class AdminLinkedDataTest(Select2Story,
case.AdminMixin,
case.OptionMixin,
case.AutocompleteTestCase):
field_name = 'test'
inline_related_name = 'inline_test_models'
model = TModel
def setUp(self):
super(AdminLinkedDataTest, self).setUp()
if not getattr(self, 'fixtures', None):
self.fixtures = OwnedFixtures()
self.fixtures.install_fixtures(self.model)
self.get(url=self.get_modeladmin_url('add'))
self.prefix = ''
def set_owner(self, value):
self.browser.execute_script(
'document.querySelector("[name=%s]").value = %s'
% (self.prefix + 'owner', value)
)
def test_filter_options(self, story=None):
if story is None:
story = stories.SelectOption(self)
story.toggle_autocomplete()
story.assert_suggestion_labels_are(
self.model.objects.values_list('name', flat=True)
)
self.set_owner(self.fixtures.test.pk)
story.refresh_autocomplete()
story.assert_suggestion_labels_are(
self.model.objects.filter(
owner=self.fixtures.test
).values_list('name', flat=True)
)
self.set_owner(self.fixtures.other.pk)
story.refresh_autocomplete()
story.assert_suggestion_labels_are(
self.model.objects.filter(
owner=self.fixtures.other
).values_list('name', flat=True)
)
def test_filter_option_in_first_inline(self):
self.prefix = '%s-%s-' % (self.inline_related_name, 0)
story = stories.InlineSelectOption(self, inline_number=0)
self.test_filter_options(story)
def test_can_select_option_in_first_extra_inline(self):
story = stories.InlineSelectOption(self, inline_number=3)
self.prefix = '%s-%s-' % (self.inline_related_name, 3)
self.test_filter_options(story)
|
{
"content_hash": "cdef234aa489e71d6b0f8a30223fba3d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 65,
"avg_line_length": 31.28985507246377,
"alnum_prop": 0.6016674386289949,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "b45fb6586eb41fc27e5cef10dd22c9c5e7d91704",
"size": "2159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/linked_data/test_functional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
}
|
from .node import TrieNode
class CarbonIndex:
"""
an index for carbon-cache instances.
"""
def __init__(self, name='carbon_index', sep='.'):
self.name = name
self.root = TrieNode('root', is_leaf=False)
self.sep = sep
def insert(self, metric):
"""
insert metric to carbon Trie Index.
args: metric name.
"""
metric_parts = metric.split(self.sep)
self._insert(self.root, metric_parts)
def has_metric(self, metric):
"""
check if the metric exists.
args: metric.
"""
metric_parts = metric.split(self.sep)
cur = self.root
for part in metric_parts:
if not cur.has_child(part):
return False
cur = cur.get(part)
return cur.is_leaf
def delete(self, metric):
"""
remove a metric from index. wild.
args: metric, should not include wildcards.
return: True/False.
"""
metric_parts = metric.split(self.sep)
return self._delete(self.root, metric_parts)
def expand_query(self, query):
"""
expand a wildcard query
"""
return self.root.expand_query(query)
def expand_pattern(self, pattern):
"""
expand a wildcard query pattern.
This is different from expand_query, as it may
return BranchNode.
"""
return self.root.expand_pattern(pattern)
def _insert(self, parent, metric_parts):
"""
a private helper function for insert metric.
"""
if len(metric_parts) == 0:
return
if len(metric_parts) == 1:
parent.add(TrieNode(metric_parts[0]))
return
if not parent.get(metric_parts[0]):
parent.add(TrieNode(metric_parts[0], is_leaf=False))
self._insert(parent.get(metric_parts[0]), metric_parts[1:])
def _delete(self, cur, metric_parts):
if len(metric_parts) == 0:
if cur.is_leaf:
cur.is_leaf = False
return True
else:
return False
if cur.has_child(metric_parts[0]):
nxt = cur.get(metric_parts[0])
deleted = self._delete(nxt, metric_parts[1:])
if nxt.count() == 0 and (not nxt.is_leaf):
cur.delete(nxt.name)
return deleted
else:
return False
|
{
"content_hash": "b1b71416993e77670f258a702f0e8378",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 67,
"avg_line_length": 27.31111111111111,
"alnum_prop": 0.532546786004882,
"repo_name": "yunstanford/carbon-index",
"id": "14623327cc5e9eed4a0b4ff9f678a1984d6cfda2",
"size": "2458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carbon_index/index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25504"
}
],
"symlink_target": ""
}
|
import os
import sys
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import tests # noqa
EXAMPLE_MODULE = 'neutron.tests.unit.tests.example.dir.example_module'
class ImportModulesRecursivelyTestCase(base.BaseTestCase):
def test_object_modules(self):
sys.modules.pop(EXAMPLE_MODULE, None)
modules = tools.import_modules_recursively(
os.path.dirname(tests.__file__))
self.assertIn(
'neutron.tests.unit.tests.example.dir.example_module',
modules)
self.assertIn(EXAMPLE_MODULE, sys.modules)
|
{
"content_hash": "52824d614af504f50bfefe9f3a0cfd7f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 28.80952380952381,
"alnum_prop": 0.7024793388429752,
"repo_name": "wolverineav/neutron",
"id": "1ab3e939fda9e31a8cb89662ae52ed20895a21b1",
"size": "1178",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/tests/test_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7688704"
},
{
"name": "Shell",
"bytes": "14690"
}
],
"symlink_target": ""
}
|
import requests
import xmltodict
from workitem import Workitem
# Suppress the warnings for now
requests.packages.urllib3.disable_warnings()
class IDS(object):
'''
A class to encapsulate the work needed to send REST calls to the IBM Devops Service RTC backend.
'''
def __init__(self, url, user, password):
self.base_url = url
self.session = self.create_session(user, password)
def create_session(self, jazz_user, jazz_password):
session = requests.Session()
session.verify = False
session.allow_redirects = True
session.headers = {'accept': 'application/json'}
session.auth = (jazz_user, jazz_password)
# Request for authenticated resource
auth_uri = "/authenticated/identity"
response = session.get(self.base_url + auth_uri, verify=False)
if response.status_code == 200:
return session
elif response.status_code == 401:
raise Exception("Failed to login! Make sure your username and password are correct.")
else:
raise Exception("Unknown error during session create. Response code: %s" % response.status_code)
def get(self, url):
return self.session.get(self.base_url + url, verify=False)
def get_work_items(self, **filters):
'''
Get a work item's information
:param filters: A series of key value pairs to filter on
:return: list of Workitems or None
'''
filter_string = ""
for key, val in filters.iteritems():
filter_string += "%s='%s'" % (key, val)
url = "/rpt/repository/workitem?fields=workitem/workItem[%s]/(" \
"*|\
owner/name|\
state/name|\
projectArea/name|\
type/name|\
comments/*|\
comments/creator/*|\
stringComplexity/*|\
category/*|\
creator/*|\
tags/*|\
priority/*|\
severity/*\
)" % filter_string
try:
response = self.get(url)
except requests.exceptions.ReadTimeout:
return "Request timed out :("
if response.status_code != 200:
return None
output = xmltodict.parse(response.text)
if "workItem" not in output["workitem"]:
return None
else:
workitems = []
if isinstance(output["workitem"]["workItem"], list):
for wi in output["workitem"]["workItem"]:
workitems.append(Workitem(wi))
else:
workitems.append(Workitem(output["workitem"]["workItem"]))
return workitems
def get_work_item_by_id(self, wi_id):
'''
Retrieves a single work item based off of the supplied ID
:param wi_id: The work item ID number
:return: Workitem or None
'''
work_items = self.get_work_items(id=wi_id)
if work_items is not None:
return work_items[0]
return None
def get_work_items_by_owner(self, wi_owner):
'''
Retrieves a list of work items owned by the supplied name
:param wi_owner: The name of the owner to filter for
:return: List of Workitems or None
'''
owner_filter = {"owner/name": wi_owner}
return self.get_work_items(**owner_filter)
|
{
"content_hash": "2d6e58f1f4b9b157a0f894f265aaae2d",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 108,
"avg_line_length": 33.73529411764706,
"alnum_prop": 0.5646614356291776,
"repo_name": "jroyal/pyIDS",
"id": "7fda86fc841e28bf1b28666f5dc43588d07d2930",
"size": "3441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyIDS/ids.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6431"
}
],
"symlink_target": ""
}
|
"""S3 file system implementation for accessing files on AWS S3."""
# pytype: skip-file
from apache_beam.io.aws import s3io
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
__all__ = ['S3FileSystem']
class S3FileSystem(FileSystem):
"""An S3 `FileSystem` implementation for accessing files on AWS S3
"""
CHUNK_SIZE = s3io.MAX_BATCH_OPERATION_SIZE
S3_PREFIX = 's3://'
def __init__(self, pipeline_options):
"""Initializes a connection to S3.
Connection configuration is done by passing pipeline options.
See :class:`~apache_beam.options.pipeline_options.S3Options`.
"""
super().__init__(pipeline_options)
self._options = pipeline_options
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 's3'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all of the return nulled components
"""
if not basepath.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Basepath %r must be S3 path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the S3 prefix ('s3://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Path %r must be S3 path.' % path)
prefix_len = len(S3FileSystem.S3_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in \
s3io.S3IO(options=self._options).list_prefix(dir_or_prefix).items():
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = s3io.S3IO(options=self._options).open(
path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to copy unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
return s3io.S3IO(options=self._options).copy_paths(src_dest_pairs)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to rename unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
results = s3io.S3IO(options=self._options).rename_files(src_dest_pairs)
exceptions = {(src, dest): error
for (src, dest, error) in results if error is not None}
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
try:
return s3io.S3IO(options=self._options).exists(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("exists() operation failed", {path: e})
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).size(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("size() operation failed", {path: e})
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).last_updated(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("last_updated operation failed", {path: e})
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path: e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
"""
results = s3io.S3IO(options=self._options).delete_paths(paths)
exceptions = {
path: error
for (path, error) in results.items() if error is not None
}
if exceptions:
raise BeamIOError("Delete operation failed", exceptions)
|
{
"content_hash": "93fc3cd8e8884dcc6acc1123f089fe98",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 32.107142857142854,
"alnum_prop": 0.6737486095661847,
"repo_name": "axbaretto/beam",
"id": "8a5e94e3fc7644e1195ae1bdc0577da4f7a75050",
"size": "9775",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/aws/s3filesystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v2_library = gapic.ruby_library(
'dlp', 'v2', config_path='/google/privacy/dlp/artman_dlp_v2.yaml',
artman_output_name='google-cloud-ruby/google-cloud-dlp'
)
s.copy(v2_library / 'lib')
s.copy(v2_library / 'test')
s.copy(v2_library / 'README.md')
s.copy(v2_library / 'LICENSE')
s.copy(v2_library / '.gitignore')
s.copy(v2_library / '.yardopts')
s.copy(v2_library / 'google-cloud-dlp.gemspec', merge=ruby.merge_gemspec)
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/dlp/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
|
{
"content_hash": "026d7fb485d227d6964cb207ac867654",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 116,
"avg_line_length": 32.175,
"alnum_prop": 0.6398601398601399,
"repo_name": "quartzmo/gcloud-ruby",
"id": "5cf194d8f1031f4139e8bd3a830cad69ae0acf6e",
"size": "2574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-cloud-dlp/synth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "20395"
},
{
"name": "Ruby",
"bytes": "562364"
}
],
"symlink_target": ""
}
|
import numpy as np
from .. import smooth as sm
from numpy.testing import assert_equal
import copy
import math
def test_smooth():
x = {}
y = sm.smooth(x)
assert_equal(len(x), 0)
|
{
"content_hash": "dbd44a2b67c1dda61c4a6d9203090c46",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 18,
"alnum_prop": 0.7111111111111111,
"repo_name": "berkeley-stat159/project-zeta",
"id": "a175c1cfc60d0ff841d5c1b04cded9a6025d5239",
"size": "180",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "code/utils/tests/test_smooth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7263"
},
{
"name": "Python",
"bytes": "450467"
},
{
"name": "TeX",
"bytes": "39084"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Automated Test - HRM003 Create Staff Training
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from tests import *
#import unittest, re, time
import time
class CreateStaffTraining(SeleniumUnitTest):
def test_hrm003_create_staff_training(self):
"""
@case: HRM003
@description: Create a Staff Training
* Create Course
* Create Training Event
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
@ToDo:
* Add Staff Participants
"""
browser = self.browser
self.login(account="admin", nexturl="hrm/course/create")
self.create("hrm_course",
[( "code",
"32329408",),
( "name",
"Emergency First Aid"),
]
)
self.login(account="admin", nexturl="hrm/training_event/create")
self.create("hrm_training_event",
[( "course_id",
"Emergency First Aid",
"option"),
( "start_date",
"2012-08-01"),
( "hours",
"12"),
( "site_id",
"AP Zone (Office)",
"option"),
( "comments",
"Testing comments"),
]
)
|
{
"content_hash": "08b0169677b954d50a48050b90711eb3",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 110,
"avg_line_length": 38.36363636363637,
"alnum_prop": 0.5883547731888964,
"repo_name": "snpabilonia/rgims",
"id": "3a6d5327e442080c6db47d2bc037f3620cd3aa6c",
"size": "2954",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/tests/hrm/create_staff_training.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1082340"
},
{
"name": "JavaScript",
"bytes": "14415870"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "23003354"
},
{
"name": "Shell",
"bytes": "697"
},
{
"name": "XSLT",
"bytes": "1303725"
}
],
"symlink_target": ""
}
|
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
#XXX: Jython doesn't support codepoints outside of the UTF-16 range even at
# parse time. Maybe someday we might push the error off to later, but for
# now I'm just commenting this whole test out.
# See http://bugs.jython.org/issue1836 for more.
# def test_encoding(self):
# sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
# for enc in ("utf8", "utf16"):
# b = self.type2test(sample, enc)
# self.assertEqual(b, self.type2test(sample.encode(enc)))
# self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
# b = self.type2test(sample, "latin1", "ignore")
# self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30, 0xca, 0xfe, 0xba, 0xbe]) # challenging signs
self.assertEqual(self.type2test.fromhex(u'1a2B30CafEBabe'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 CafeBabe '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
if not test.test_support.is_jython:
self.assertEqual(b.split(), [b])
else:
# \x1c .. \x1f are whitespace Jython (which follows Java)
self.assertEqual(b.split(), [b'a', b'b'])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
if not test.test_support.is_jython:
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
else:
# \x1c .. \x1f are whitespace Jython
self.assertEqual(b.split(), [])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
if not test.test_support.is_jython:
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
else:
# \x1c .. \x1f are whitespace Jython
self.assertEqual(b.rsplit(), [])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
def test_translate(self):
# adapted from AssortedBytesTest.test_translate
b = self.type2test(b'hello')
rosetta = self.type2test().join(map(chr,range(256)))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = b.translate(None, b'the larch')
self.assertEqual(c, b'o')
stone = self.type2test(''.join(map(chr,range(1,256))))
self.assertRaises(ValueError, b.translate, stone, b'short')
self.assertRaises(TypeError, b.translate, rosetta, None)
self.assertRaises(TypeError, b.translate, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "7c40d1033e4bc769d17fc5011a4d63c7",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 96,
"avg_line_length": 38.68836805555556,
"alnum_prop": 0.5695662904709552,
"repo_name": "azoft-dev-team/imagrium",
"id": "19454fd4ffb85f62410b0d314e2016205ad4723d",
"size": "44569",
"binary": false,
"copies": "2",
"ref": "refs/heads/win",
"path": "env/Lib/test/test_bytes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22116"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111703"
},
{
"name": "Java",
"bytes": "448343"
},
{
"name": "Python",
"bytes": "14076342"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ruby",
"bytes": "5269"
},
{
"name": "Shell",
"bytes": "3193"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import verutils
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb.edgeql import parser as qlparser
from . import annos as s_anno
from . import delta as sd
from . import name as sn
from . import objects as so
from . import schema as s_schema
class ExtensionPackage(
so.GlobalObject,
s_anno.AnnotationSubject,
qlkind=qltypes.SchemaObjectClass.EXTENSION_PACKAGE,
data_safe=False,
):
version = so.SchemaField(
verutils.Version,
compcoef=0.9,
)
script = so.SchemaField(
str,
compcoef=0.9,
)
@classmethod
def get_schema_class_displayname(cls) -> str:
return 'extension package'
@classmethod
def get_shortname_static(cls, name: sn.Name) -> sn.UnqualName:
return sn.UnqualName(sn.shortname_from_fullname(name).name)
@classmethod
def get_displayname_static(cls, name: sn.Name) -> str:
shortname = cls.get_shortname_static(name)
return shortname.name
class Extension(
so.Object,
qlkind=qltypes.SchemaObjectClass.EXTENSION,
data_safe=False,
):
package = so.SchemaField(
ExtensionPackage,
)
class ExtensionPackageCommandContext(
sd.ObjectCommandContext[ExtensionPackage],
s_anno.AnnotationSubjectCommandContext,
):
pass
class ExtensionPackageCommand(
sd.GlobalObjectCommand[ExtensionPackage],
s_anno.AnnotationSubjectCommand[ExtensionPackage],
context_class=ExtensionPackageCommandContext,
):
@classmethod
def _classname_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext
) -> sn.UnqualName:
assert isinstance(astnode, qlast.ExtensionPackageCommand)
parsed_version = verutils.parse_version(astnode.version.value)
quals = ['pkg', str(parsed_version)]
pnn = sn.get_specialized_name(sn.UnqualName(astnode.name.name), *quals)
return sn.UnqualName(pnn)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
if not context.stdmode and not context.testmode:
raise errors.UnsupportedFeatureError(
'user-defined extension packages are not supported yet',
context=astnode.context
)
return super()._cmd_tree_from_ast(schema, astnode, context)
class CreateExtensionPackage(
ExtensionPackageCommand,
sd.CreateObject[ExtensionPackage],
):
astnode = qlast.CreateExtensionPackage
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> CreateExtensionPackage:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, CreateExtensionPackage)
assert isinstance(astnode, qlast.CreateExtensionPackage)
assert astnode.body.text is not None
parsed_version = verutils.parse_version(astnode.version.value)
cmd.set_attribute_value('version', parsed_version)
cmd.set_attribute_value('script', astnode.body.text)
cmd.set_attribute_value('builtin', context.stdmode)
if not cmd.has_attribute_value('internal'):
cmd.set_attribute_value('internal', False)
return cmd
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
assert isinstance(node, qlast.CreateExtensionPackage)
if op.property == 'script':
node.body = qlast.NestedQLBlock(
text=op.new_value,
commands=cast(
List[qlast.DDLOperation],
qlparser.parse_block(op.new_value)),
)
elif op.property == 'version':
node.version = qlast.StringConstant(
value=str(op.new_value),
)
else:
super()._apply_field_ast(schema, context, node, op)
class DeleteExtensionPackage(
ExtensionPackageCommand,
sd.DeleteObject[ExtensionPackage],
):
astnode = qlast.DropExtensionPackage
class ExtensionCommandContext(
sd.ObjectCommandContext[Extension],
):
pass
class ExtensionCommand(
sd.ObjectCommand[Extension],
context_class=ExtensionCommandContext,
):
pass
class CreateExtension(
ExtensionCommand,
sd.CreateObject[Extension],
):
astnode = qlast.CreateExtension
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext
) -> CreateExtension:
assert isinstance(astnode, qlast.CreateExtension)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, CreateExtension)
if astnode.version is not None:
parsed_version = verutils.parse_version(astnode.version.value)
cmd.set_attribute_value('version', parsed_version)
return cmd
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
filters = [
lambda schema, pkg: (
pkg.get_shortname(schema) == self.classname
)
]
version = self.get_attribute_value('version')
if version is not None:
filters.append(
lambda schema, pkg: pkg.get_version(schema) == version,
)
self.discard_attribute('version')
pkgs = list(schema.get_objects(
type=ExtensionPackage,
extra_filters=filters,
))
if not pkgs:
if version is None:
raise errors.SchemaError(
f'cannot create extension {self.get_displayname()!r}:'
f' extension package {self.get_displayname()!r} does'
f' not exist'
)
else:
raise errors.SchemaError(
f'cannot create extension {self.get_displayname()!r}:'
f' extension package {self.get_displayname()!r} version'
f' {str(version)!r} does not exist'
)
pkgs.sort(key=lambda pkg: pkg.get_version(schema), reverse=True)
self.set_attribute_value('package', pkgs[0])
return schema
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
assert isinstance(node, qlast.CreateExtension)
pkg = self.get_resolved_attribute_value(
'package', schema=schema, context=context)
node.version = qlast.StringConstant(
value=str(pkg.get_version(schema))
)
return node
class DeleteExtension(
ExtensionCommand,
sd.DeleteObject[Extension],
):
astnode = qlast.DropExtension
|
{
"content_hash": "5f46819629c6145bd0ee50b6e34b714d",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 28.402298850574713,
"alnum_prop": 0.6251180358829084,
"repo_name": "edgedb/edgedb",
"id": "e8418fc22dfaea32037c3f862d604db22b8ae28b",
"size": "8094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edb/schema/extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "372837"
},
{
"name": "JavaScript",
"bytes": "7481"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "9860929"
},
{
"name": "Rust",
"bytes": "238373"
}
],
"symlink_target": ""
}
|
"""
Credit Card Validator - Takes in a credit card number from a
common credit card vendor (Visa, MasterCard, American Express,
Discoverer) and validates it to make sure that it is a valid
number (look into how credit cards use a checksum).
This program works with *most* credit card numbers.
Uses Luhn Algorithm (http://en.wikipedia.org/wiki/Luhn_algorithm).
1. From the rightmost digit, which is the check digit, moving
left, double the value of every second digit; if product of this
doubling operation is greater than 9 (e.g., 7 * 2 = 14), then
sum the digits of the products (e.g., 10: 1 + 0 = 1, 14: 1 + 4 = 5).
2. Add together doubled digits with the undoubled digits from the
original number.
3. If the total modulo 10 is equal to 0 (if the total ends in zero)
then the number is valid according to the Luhn formula; else it is
not valid.
"""
if __name__ == '__main__':
number = raw_input('Enter the credit card number of check: ').replace(' ', '')
#if not number.isdigit():
# raise Exception('Invalid credit card number. Make sure it\'s all digits (with optional spaces in between).'
digits = [int(char) for char in number]
# double alternate digits (step 1)
doubled = [(digit * 2) if (i % 2 == 0) else digit \
for (i, digit) in enumerate(digits)] # i <3 python
# sum digits of number > 10 (step 2)
summed = [num if num < 10 else sum([int(dig) for dig in str(num)]) \
for num in doubled] # i <3 python ** 2
# step 3
if sum(summed) % 10 == 0:
print 'The number is valid'
else:
print 'The number is invalid'
|
{
"content_hash": "0b90a79cc099ce789360cc8d680044d9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 116,
"avg_line_length": 39.292682926829265,
"alnum_prop": 0.6654252017380509,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "7bb0a39cfdcd8a093f4cf7577ab996d1aa5a9b2c",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/credit_card_validator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
}
|
"""
Script used interact with different tools in hots comp builder
Load and compare data from the iv_data database.
Provide amount of players, favorite heroes, and map name.
Output a map score based on input and build best comp between players.
"""
import datetime
import argparse
import json
import re
import os
from fuzzywuzzy import process
from string import strip
from sets import Set
from ivscrape import IcyVeinsScrape
class CompBuilder:
def __init__(self, build=None, hero_league=None, players_build=None,
map_name=None, team_league=None):
#args
self.map_name = map_name
#files
self.db_path = os.path.dirname(os.path.realpath(__file__))
self.db_file = os.path.join(self.db_path, "iv_data.json")
self.player_db_path = os.path.dirname(os.path.realpath(__file__))
self.player_db_file = os.path.join(
self.player_db_path, "player_data.json")
self.player_favs = {"players": {}}
self.db = self.open_db()
self.player_db = self.open_player_db()
#args control
if self.map_name != None:
self.accepted_map_list()
if hero_league == True:
self.hero_league()
if players_build == True:
self.build_player_db()
if build == True:
self.build_iv_db()
if team_league == True:
self.team_league()
def player_pick(self, pcount):
for i in enumerate(sorted(self.player_db["players"]), start=1):
print "{}) {}".format(i[0],i[1])
x = self.player_db["players"][i[1]]
print ", ".join(str(xx) for xx in x)
players = []
for x in xrange(0, int(pcount)):
x = x - 1
num = raw_input("Which Players are playing? >")
regex = "{}".format(len(self.player_db["players"].keys()))
if re.match("^[1-{}]".format(regex), num):
players.append("player"+str(num))
elif num == "exit":
break
else:
break
return players
def team_league(self):
""" build comps between players fav heros
"""
print len(self.player_db["players"])
while True:
pcount = raw_input("How many Players? >")
if re.match("{}".format(len(self.player_db["players"])), pcount):
players = list(self.player_db["players"])
break
elif re.match("^[2-{}]$".format(len(self.player_db["players"])), pcount):
players = self.player_pick(pcount)
break
else:
print "Player count must match the length of players in database "
print "and players count must be between 2-5"
players.sort()
comps = []
for i in xrange(len(players), 0, -1):
pop = players.pop(i-1)
for hero in self.player_db["players"][pop]:
heroes = hero
for others in players:
op_heroes = self.player_db["players"][others]
for oheroes in op_heroes:
if heroes in self.db["database"][oheroes][oheroes+"-synergize"]:
comps.append(((pop, others),(heroes, oheroes)))
if oheroes in self.db["database"][heroes][heroes+"-synergize"]:
comps.append(((others, pop),(oheroes, heroes)))
players.append(pop)
comps = Set(comps)
three_man = []
four_man = []
for i in xrange(len(comps), 0, -1):
l_comps = list(comps)
#print i
#print l_comps
pop = l_comps.pop(i-1)
#print pop[0]
#print "main"
if len(players) == 3:
for l in l_comps: # 3 man comps
a,b,c,d, = pop[1][0], pop[1][1], l[1][0], l[1][1]
#print a,b,c,d
if (a == c or
a == d):
#if (a1 == c3 or a1 == d4) and (b2 == c3 or b2 == d4):
#print "could be good", pop, l
three_man.append((pop, l))
if (b == c or
b == d):
#if (a1 == c3 or a1 == d4) and (b2 == c3 or b2 == d4):
#print "could be good", pop, l
three_man.append((pop, l))
if len(players) == 4:
for l in l_comps: # 4 man comps
if (pop[0][0] == l[0][0] or
pop[0][0] == l[0][1] or
pop[0][1] == l[0][0] or
pop[0][1] == l[0][1]
):
print "to be removed {}{}".format(pop, l)
l_comps.append(pop)
#refine three_man
if len(players) == 3:
three_comp = []
three_final = Set()
for t in three_man:
a,b,c,d = t[0][0],t[0][1],t[1][0],t[1][1]
a1,a2,c3,c4 = a[0], a[1], c[0], c[1]
#print a,b,c,d
#print a1,a2,c3,c4
#print t
if ((a1 == c3 or
a1 == c4) and
(a2 == c3 or
a2 == c4)):
three_man.remove(t)
else:
p1 = [i for i in a]
p2 = [t for t in c]
h1 = [s for s in b]
h2 = [x for x in d]
pl = p1 + p2
he = h1 + h2
pl_set = Set(pl)
he_set = Set(he)
#ms = self.map_score_combo(he_set)
three_final.add(he_set)
for t in three_final:
if len(t) > 2 and self.map_name is not None:
ms = self.map_score_combo(t)
if ms >= 70.0:
print "Three Man: {} MapScore:{:.2g}".format(', '.join(t), ms)
def hero_league(self):
""" looks for enemy team picks to see if your heros are
counterd by their picks.
::todo:: remove from list if enemy picks hero
::todo:: print map score and picks before enemy picks
::todo:: enter bans
::todo:: enter allie picks to build comp
"""
print "Which player are you?"
for i in enumerate(sorted(self.player_db["players"]), start=1):
print "{}) {}".format(i[0],i[1])
x = self.player_db["players"][i[1]]
print ", ".join(str(xx) for xx in x)
while True:
y = raw_input("> ")
if int(y) <= len(self.player_db["players"]) and int(y) > 0:
player = self.player_db["players"]["player"+y]
break
else:
continue
while True:
print "Enter enemy team's hero picks or enter 'exit' to close."
z = raw_input(" >")
choice = process.extractOne(z, self.db["database"].keys())
for h in player:
if choice[0] in self.db["database"][str(h)][str(h)+"-counter"]:
player.remove(h)
print "You entered {}".format(choice[0])
print "Your remaing picks!"
if self.map_name != None:
for i in player:
aa = str(i)
ms = self.map_score(str(i))
ms = (float(ms) / 5.0) * 100.0
print "Hero:{} MapScore:{}%".format(aa, ms)
else:
for i in player:
print "Hero:{}".format(str(i))
if len(player) <= 0:
print "All you favorite heros are counterd. Good luck!"
break
if z == "exit":
break
def map_score_combo(self, heroes):
"""get map score for multiple heroes
"""
h_count = len(heroes)
total_ms = 0.0
for h in heroes:
ms = self.map_score(str(h))
total_ms = total_ms + ms
score = (float(total_ms) / (5.0 * float(h_count))) * 100.0
return score
def map_score(self, hero):
"""
get map score for a single hero input.
give hero spit of maps score.
0.0 for None
1.0 for weakmap
3.0 for averagemap
5.0 for strong map
"""
x = hero
y = self.map_name
if y in self.db["database"][x][x+"-strong-maps"]:
score = 5.0
elif y in self.db["database"][x][x+"-average-maps"]:
score = 3.0
elif y in self.db["database"][x][x+"-weak-maps"]:
score = 1.0
else:
score = 0.0
return score
def build_player_db(self):
""" build player and favorite hero database
"""
#figure out regex
while True:
pcount = raw_input("How many Players? >")
if re.match("^[1-5]$", pcount):
break
else:
print "Player count must be between 1-5"
hero_list = [h for h in self.db["database"]]
hero_list.sort()
player_list = ["player" + str(i + 1) for i in xrange(int(pcount))]
for a,b,c,d in zip(hero_list[::4],hero_list[1::4],hero_list[2::4], hero_list[3::4]):
print "{:<30}{:<30}{:<30}{:<}".format(a,b,c,d)
for p in player_list:
p_favs = []
while True:
mes = "Type a hero name for {} or 'exit' to end. >".format(p)
x = raw_input(mes)
choice = process.extractOne(x, self.db["database"].keys())
if choice[1] >= 50:
print "{} added to {} favorites".format(choice[0], p)
p_favs.append(choice[0])
self.player_favs["players"][p] = p_favs
elif x == "exit":
break
elif choice[1] < 50:
print "{} is not an accepted hero".format(x)
continue
else:
break
"""
for p in player_list:
p_favs = []
while True:
mes = "Select a hero number for {} >".format(p)
x = raw_input(mes)
print "Enter 0 to move to next player or exit"
if int(x) <= len(hero_list) and int(x) > 0:
p_favs.append(hero_list[int(x)- 1])
self.player_favs["players"][p] = p_favs
elif int(x) == 0:
break
else:
print "Number selected fell outside of hero range"
break
"""
self.player_favs["datebase created"] = "{:%Y-%m-%d %H:%M:%S}"\
.format(datetime.datetime.now())
with open(self.player_db_file, 'w') as f:
json.dump(self.player_favs, f, indent=2, sort_keys=True)
def build_iv_db(self):
""" rebuild database
"""
iv = IcyVeinsScrape()
iv.get_hero_list()
iv.build_db()
iv.write_all_to_db()
def open_player_db(self):
""" open and write player db to variable
"""
if os.path.isfile(self.player_db_file) == False:
print "player datebase file non-existent "\
"run -p option"
with open(self.player_db_file) as f:
db = json.load(f)
return db
def open_db(self):
""" open and write iv database to variable
"""
if os.path.isfile(self.db_file) == False:
print "hero database file non-existent "\
"run -b option"
with open(self.db_file) as f:
db = json.load(f)
return db
def accepted_map_list(self):
""" get accepted map name if user inputs incorrect map name
::todo::change to use fuzzywuzzy
"""
if self.map_name not in self.db["database"]["map-list"]:
print "Map name not accepted! Choose from map list"
for i in enumerate(self.db["database"]["map-list"],
start=1):
print "{}) {}".format(i[0], i[1])
while True:
x = raw_input(">")
if int(x) <= len(self.db["database"]["map-list"])\
and int(x) > 0:
self.map_name = self.db["database"]["map-list"][int(x) -1]
break
return self.map_name
else:
return self.map_name
def parser():
""" Parser for user input
"""
parser = argparse.ArgumentParser(
description="Use options to choose different play styles")
parser.add_argument('-b', '--build', action='store_true',
help="use this option to re-build the database from 'ivscrape'")
parser.add_argument('-H', '--hero_league', action='store_true',
help="will launch in hero league mode, will remove "
"heroes from favorite hero "\
"list as they are counterd by the enemy teams picks ")
parser.add_argument('-m', '--map_name',
help= "input map that is being played on "\
"will provide you with hero map score ")
parser.add_argument('-p', '--players_build', action='store_true',
help =" will launch player database builder mode. Used to build "
"player count and each players favorite heroes")
parser.add_argument('-t', '--team_league', action='store_true',
help = " will launch team league mode, used to find best "
"comps between players favorite heroes")
args = parser.parse_args()
return args
def main():
args = parser()
cb = CompBuilder(args.build, args.hero_league, args.players_build,
args.map_name, args.team_league)
if __name__ == ("__main__"):
main()
|
{
"content_hash": "1c7c190bb7e7e0f7914e1681748fa217",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 92,
"avg_line_length": 35.91708542713568,
"alnum_prop": 0.460930395243092,
"repo_name": "jup3/hots",
"id": "32a1984ed4a9cd3757bcd07786a270938de5c43e",
"size": "14317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comp_builder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22193"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('report_ia', '0059_remove_aantalstarts_totaal_vliegdagen'),
]
operations = [
]
|
{
"content_hash": "22397f5cc34485d6c88dd09242901b1a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 18.307692307692307,
"alnum_prop": 0.6722689075630253,
"repo_name": "jefke-glider/gliding",
"id": "2174ebce1efee3f856e83929c62a579dae4ce018",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ato/report_ia/migrations/0060_auto_20170406_1543.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32404"
},
{
"name": "PostScript",
"bytes": "63220"
},
{
"name": "Python",
"bytes": "140918"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0011_auto_20170201_1906'),
]
operations = [
migrations.RemoveField(
model_name='album',
name='tags',
),
]
|
{
"content_hash": "eaa88cd982fd24e3dbe972a4dc948fe4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 19,
"alnum_prop": 0.5851393188854489,
"repo_name": "clair3st/django-imager",
"id": "b5b1c9ca90642d2b94959382d80fbafa8fc64df6",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/migrations/0012_remove_album_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4601"
},
{
"name": "HTML",
"bytes": "20459"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "93575"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {"expand_times": [1, 2, 2]}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32"
)
out = paddle.fluid.layers.expand(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[2, 2])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32"
)
expand_times = paddle.fluid.layers.fill_constant(
shape=[len(self.feed_shape[0])], dtype="int32", value=2
)
out = paddle.fluid.layers.expand(
x, expand_times=expand_times, **self.attrs
)
self.fetch_list = [out.name]
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2c59d09449881ef9b0ca5ffa971a7f39",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 30.03846153846154,
"alnum_prop": 0.5821596244131455,
"repo_name": "luotao1/Paddle",
"id": "784a6a41a41f563d650ba559252739f3c3af54cb",
"size": "2954",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
from urllib import parse
import pkg_resources
from spring.cbgen_helpers import backoff, quiet, time_all, timeit
cb_version = pkg_resources.get_distribution("couchbase").version
if cb_version[0] == '2':
from couchbase import experimental, subdocument
from couchbase.bucket import Bucket
from couchbase.n1ql import N1QLQuery
from couchbase.views.params import ViewQuery
from txcouchbase.connection import Connection as TxConnection
experimental.enable()
class CBAsyncGen:
TIMEOUT = 120 # seconds
def __init__(self, **kwargs):
self.client = TxConnection(quiet=True, **kwargs)
self.client.timeout = self.TIMEOUT
def create(self, key: str, doc: dict, persist_to: int = 0,
replicate_to: int = 0, ttl: int = 0):
return self.client.upsert(key, doc,
persist_to=persist_to,
replicate_to=replicate_to,
ttl=ttl)
def create_durable(self, key: str, doc: dict, durability: int = None, ttl: int = 0):
return self.client.upsert(key, doc,
durability_level=durability,
ttl=ttl)
def read(self, key: str):
return self.client.get(key)
def update(self, key: str, doc: dict, persist_to: int = 0,
replicate_to: int = 0, ttl: int = 0):
return self.client.upsert(key, doc,
persist_to=persist_to,
replicate_to=replicate_to,
ttl=ttl)
def update_durable(self, key: str, doc: dict, durability: int = None, ttl: int = 0):
return self.client.upsert(key, doc,
durability_level=durability,
ttl=ttl)
def delete(self, key: str):
return self.client.remove(key)
class CBGen(CBAsyncGen):
TIMEOUT = 120 # seconds
N1QL_TIMEOUT = 600
def __init__(self, ssl_mode: str = 'none', n1ql_timeout: int = None, **kwargs):
connection_string = 'couchbase://{host}/{bucket}?password={password}&{params}'
connstr_params = parse.urlencode(kwargs["connstr_params"])
if ssl_mode == 'data' or ssl_mode == 'n2n':
connection_string = connection_string.replace('couchbase',
'couchbases')
connection_string += '&certpath=root.pem'
connection_string = connection_string.format(host=kwargs['host'],
bucket=kwargs['bucket'],
password=kwargs['password'],
params=connstr_params)
self.client = Bucket(connection_string=connection_string)
self.client.timeout = self.TIMEOUT
self.client.n1ql_timeout = n1ql_timeout if n1ql_timeout else self.N1QL_TIMEOUT
@quiet
@backoff
def create(self, *args, **kwargs):
super().create(*args, **kwargs)
@quiet
@backoff
def create_durable(self, *args, **kwargs):
super().create_durable(*args, **kwargs)
@time_all
def read(self, *args, **kwargs):
super().read(*args, **kwargs)
@time_all
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
@time_all
def update_durable(self, *args, **kwargs):
super().update_durable(*args, **kwargs)
@quiet
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
@timeit
def view_query(self, ddoc: str, view: str, query: ViewQuery):
tuple(self.client.query(ddoc, view, query=query))
@quiet
@timeit
def n1ql_query(self, n1ql_query: N1QLQuery):
tuple(self.client.n1ql_query(n1ql_query))
class SubDocGen(CBGen):
@quiet
@timeit
def read(self, key: str, field: str):
self.client.lookup_in(key, subdocument.get(path=field))
@quiet
@timeit
def update(self, key: str, field: str, doc: dict):
new_field_value = doc[field]
self.client.mutate_in(key, subdocument.upsert(path=field,
value=new_field_value))
@quiet
@timeit
def read_xattr(self, key: str, field: str):
self.client.lookup_in(key, subdocument.get(path=field,
xattr=True))
@quiet
@timeit
def update_xattr(self, key: str, field: str, doc: dict):
self.client.mutate_in(key, subdocument.upsert(path=field,
value=doc,
xattr=True,
create_parents=True))
|
{
"content_hash": "f7bc574ed6560971055e586f379fda48",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 88,
"avg_line_length": 34.13380281690141,
"alnum_prop": 0.5364142768722922,
"repo_name": "couchbase/perfrunner",
"id": "872832a2d87f4b8aa7537b7187a73a560cf9abe5",
"size": "4847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spring/cbgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1853"
},
{
"name": "Dockerfile",
"bytes": "2761"
},
{
"name": "Go",
"bytes": "37531"
},
{
"name": "Groovy",
"bytes": "46365"
},
{
"name": "HCL",
"bytes": "40219"
},
{
"name": "Inno Setup",
"bytes": "25281"
},
{
"name": "JavaScript",
"bytes": "14317"
},
{
"name": "Makefile",
"bytes": "2405"
},
{
"name": "Python",
"bytes": "2416900"
},
{
"name": "Ruby",
"bytes": "154"
},
{
"name": "Shell",
"bytes": "5016"
}
],
"symlink_target": ""
}
|
"""
docker_registry.drivers.azure
~~~~~~~~~~~~~~~~~~~~~~~~~~
Microsoft Azure Blob Storage driver.
"""
import io
import logging
import os
import shutil
from docker_registry.core import driver
from docker_registry.core import exceptions
from docker_registry.core import lru
import azure
from azure.storage import BlobService
logger = logging.getLogger(__name__)
class Storage(driver.Base):
supports_bytes_range = True
def __init__(self, path=None, config=None):
self._config = config
self._container = self._config.azure_storage_container
protocol = 'https' if self._config.azure_use_https else 'http'
acct_name = self._config.azure_storage_account_name
acct_key = self._config.azure_storage_account_key
self._blob = BlobService(
account_name=acct_name, account_key=acct_key, protocol=protocol)
self._init_container()
logger.debug("Initialized azureblob storage driver")
def _init_container(self):
'''Initializes image container on Azure blob storage if the container
does not exist.
'''
created = self._blob.create_container(
self._container, x_ms_blob_public_access='blob',
fail_on_exist=False)
if created:
logger.info('Created blob container for image registry.')
else:
logger.debug('Registry container already exists.')
return created
@lru.get
def get_content(self, path):
try:
return self._blob.get_blob(self._container, path)
except azure.WindowsAzureMissingResourceError:
raise exceptions.FileNotFoundError('%s is not there' % path)
@lru.set
def put_content(self, path, content):
self._blob.put_blob(self._container, path, content, 'BlockBlob')
return path
def stream_read(self, path, bytes_range=None):
try:
f = io.BytesIO()
self._blob.get_blob_to_file(self._container, path, f)
if bytes_range:
f.seek(bytes_range[0])
total_size = bytes_range[1] - bytes_range[0] + 1
else:
f.seek(0)
while True:
buf = None
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = f.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = f.read(self.buffer_size)
if not buf:
break
yield buf
except IOError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def stream_write(self, path, fp):
self._blob.put_block_blob_from_file(self._container, path, fp)
def list_directory(self, path=None):
if not path.endswith('/'):
path += '/' # path=a would list a/b.txt as well as 'abc.txt'
blobs = list(self._blob.list_blobs(self._container, path))
if not blobs:
raise exceptions.FileNotFoundError('%s is not there' % path)
return [b.name for b in blobs]
def exists(self, path):
try:
self._blob.get_blob_properties(self._container, path)
return True
except azure.WindowsAzureMissingResourceError:
return False
@lru.remove
def remove(self, path):
is_blob = self.exists(path)
if is_blob:
self._blob.delete_blob(self._container, path)
return
exists = False
blobs = list(self._blob.list_blobs(self._container, path))
if not blobs:
raise exceptions.FileNotFoundError('%s is not there' % path)
for b in blobs:
self._blob.delete_blob(self._container, b.name)
def get_size(self, path):
try:
properties = self._blob.get_blob_properties(self._container, path)
return int(properties['content-length']) # auto-converted to long
except azure.WindowsAzureMissingResourceError:
raise exceptions.FileNotFoundError('%s is not there' % path)
|
{
"content_hash": "b281b27f3a2669206e4d9d39fb83d527",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 78,
"avg_line_length": 31.843971631205672,
"alnum_prop": 0.5665924276169265,
"repo_name": "ahmetalpbalkan/docker-registry-driver-azure",
"id": "37d246d54a16d24cb5d3a581f0fef9bf17e1e38c",
"size": "5114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker_registry/drivers/azureblob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7785"
}
],
"symlink_target": ""
}
|
import sys
def readfile(filepath):
content_arr = []
fp = open(filepath, 'r')
while True:
line = fp.readline()
if not line:
break
content_arr.append(line.strip())
fp.close()
return content_arr
def maketable(content_arr):
"""Convert content to table.
The first line indicate the title of table"""
content = ""
title = content_arr[0]
content_arr = content_arr[1:]
content += "<b>" + title + "</b><br/>\n"
content += "<table cellpadding=0 cellspacing=0 border=1>\n"
i = 0
for line in content_arr:
col = line.strip().split('\t')
#µÚÒ»ÐУ¬±íÍ·
if i == 0:
content += "<tr bgcolor=#eeeeee>\n"
for content_str in col:
content += "<td width=100><b>" + content_str +"</b></td>\n";
content += "</tr>"
i = i + 1
continue
else:
content += "<tr>\n"
for content_str in col:
content += "<td align=left>" + content_str + "</td>\n"
content += "</tr>\n"
content += "</table><p/>\n"
return content
def makeplain(content_arr):
"""Convert a plain text to html, with <p> seg.
"""
content = ""
content += "<p>\n"
for line in content_arr:
content += line
content += "</p>\n"
content += "<p/>\n"
return content
def convert(filetype, arr):
html = ''
if filetype == "table":
html += maketable(arr)
elif filetype == "plain":
html += makeplain(arr)
return html
def formathtml(filepath):
"""Format a file to html form:
The first line of this file indicate the type:
'table' or 'plain'
"""
content_arr = readfile(filepath)
if not content_arr:
return ""
now_arr = []
filetype = ''
html = ''
for s in content_arr:
if s in ['table', 'plain']:
filetype = s
if len(now_arr) > 0:
html += convert(filetype, now_arr)
now_arr = []
else:
now_arr.append(s)
if filetype and len(now_arr) > 0:
html += convert(filetype, now_arr)
return html
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "need source file path"
sys.exit(1)
encoding = 'gbk'
if sys.argv[1].lower() in ['utf-8', 'utf8', 'gbk', 'gb2312', 'gb18030']:
encoding = sys.argv[1]
files = sys.argv[2:]
else:
files = sys.argv[1:]
body = "<html>\n"
body += "<head>\n"
body += "<meta name=\"Content-Type\" content=\"text/html; charset=%s\" />\n" % (encoding)
body += "<style type=\"text/css\">\n"
body += "body {font-size: 14px; MARGIN: auto; FONT-FAMILY: arial}\n"
body += "table {border-collapse:collapse; margin:0px, 2px; padding:2px; font-size:12px; }\n</style>\n</head>\n<body>\n"
for f in files:
body += formathtml(f)
body += "<hr size=1><br>\n";
body += "</body>\n</html>\n";
print body
|
{
"content_hash": "8cd0ba90e80543b543d561bd1c20562b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 123,
"avg_line_length": 25.905982905982906,
"alnum_prop": 0.5123721544044869,
"repo_name": "guozengxin/common_tools",
"id": "cc7cf581e6b63e1d84f874db86fccc0650f5dc6f",
"size": "3071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "send_mail/convert_html.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "2818"
},
{
"name": "Perl",
"bytes": "1279"
},
{
"name": "Python",
"bytes": "35031"
},
{
"name": "Shell",
"bytes": "4888"
}
],
"symlink_target": ""
}
|
import logging
from random import randint
from django import template
from django.contrib import auth
from django.template.response import TemplateResponse
from connection import views
from connection.models import *
__author__ = 'Sami'
register = template.Library()
@register.assignment_tag()
def random_number():
return randint(1,12)
logger = logging.getLogger(__name__)
@register.assignment_tag()
def addProjet(request,num):
logger.error("test")
userD = SuperUser.objects.get(user=auth.get_user(request))
logger.error("test")
all_p = Project.objects.all()
temp = Project()
for t in all_p:
if (t.project_id == num):
logger.error("iftest")
temp = t
logger.error(temp)
userD.projet_aide = temp
# logger.error(SuperUser(request.user).projet_aide)
userD.save()
#logger.error(Project.objects.get(project_id=temp.project_id))
context = {
'liste': Project.objects.all()
}
return TemplateResponse(request, "forms/index.html", context)
|
{
"content_hash": "5717ff70544b4d3bc959c2de35253493",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 24.904761904761905,
"alnum_prop": 0.6768642447418738,
"repo_name": "le2s/LibreCoach",
"id": "58f3b16467e63cd76caaf8d77e2d8303bee5f6e9",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "connection/templatetags/extra_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129337"
},
{
"name": "HTML",
"bytes": "45376"
},
{
"name": "JavaScript",
"bytes": "135606"
},
{
"name": "Python",
"bytes": "29275"
}
],
"symlink_target": ""
}
|
import os
import scipy.io as sio
from matplotlib.pyplot import savefig, imshow, annotate, imread, figure
import numpy as np
from utils import split_filename
def getname(filename):
_,name,ext = split_filename(filename)
oldname = os.path.join('uploads',name+ext)
newname = os.path.join('downloads',name+'.jpg')
return oldname, newname
def get_centroids(labels):
labelnums = np.unique(labels)
centroids = []
for label in labelnums:
centroids.append(np.mean(np.asarray(np.nonzero(labels==label)), axis = 1))
return centroids
def get_vals(labels,data):
labelnums = np.unique(labels)
vals = {}
vals["roi"] = []
vals["mean"] = []
for i,label in enumerate(labelnums):
vals["mean"].append(np.mean(data[labels==label]))
vals["roi"].append(i)
return vals
def convert(files):
oldname, newname = getname(files["filename"])
foo = open(oldname,'w')
foo.write(files["body"])
foo.close()
bar = sio.loadmat(oldname)
keys = [b for b in bar.keys() if not b.startswith("__")]
if len(keys) == 1:
data = bar[keys[0]]
figure()
imshow(data)
centroids = get_centroids(data)
for i,c in enumerate(centroids):
annotate('%d'%i,c[::-1],color="black");
savefig(newname)
return {"download": newname}
def extract(files):
matfile = [f for f in files if f["filename"].endswith(".mat")][0]
imgfile = [f for f in files if f["filename"].endswith(".jpg")][0]
oldimgname, newimgname = getname(imgfile["filename"])
foo = open(oldimgname,'w')
foo.write(imgfile["body"])
foo.close()
oldmatname, newmatname = getname(matfile["filename"])
foo = open(oldmatname,'w')
foo.write(matfile["body"])
foo.close()
bar = sio.loadmat(oldmatname)
keys = [b for b in bar.keys() if not b.startswith("__")]
if len(keys) == 1:
data = bar[keys[0]]
figure(1)
imshow(data)
centroids = get_centroids(data)
for i,c in enumerate(centroids):
annotate('%d'%i,c[::-1],color="black");
savefig(newmatname)
vals = get_vals(data,imread(oldimgname))
valsfile = open("downloads/vals.tsv","w")
valsfile.write("roi\tmean\n")
for i in range(1,len(vals["roi"])):
valsfile.write("%d\t%f\n"%(vals["roi"][i],vals["mean"][i]))
valsfile.close()
return {"download": newmatname,"vals": "downloads/vals.tsv"}
|
{
"content_hash": "3b9dbb4581b9676c0b8076b1211c8aa3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 31.098765432098766,
"alnum_prop": 0.591901548233426,
"repo_name": "akeshavan/img_utils",
"id": "6e2ce277e18ea6d9d56ffd1bac263819be0d5d74",
"size": "2519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/mat_viewer/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1719"
},
{
"name": "JavaScript",
"bytes": "7318"
},
{
"name": "Python",
"bytes": "22894"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^', include('petycja_norweskie.petitions.urls', namespace='petitions')),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
{
"content_hash": "720f178859303c388c87aa2b40f33e24",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 110,
"avg_line_length": 39.515151515151516,
"alnum_prop": 0.6886503067484663,
"repo_name": "watchdogpolska/petycja-norweskie",
"id": "349393e23efda40c314f29c95b43a43f10b36c91",
"size": "1328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22434"
},
{
"name": "HTML",
"bytes": "32390"
},
{
"name": "JavaScript",
"bytes": "40842"
},
{
"name": "Python",
"bytes": "102071"
}
],
"symlink_target": ""
}
|
"""Datasets."""
# --- import --------------------------------------------------------------------------------------
import pathlib
from .. import kit as wt_kit
# --- define --------------------------------------------------------------------------------------
here = pathlib.Path(__file__).parent.resolve()
# --- container class -----------------------------------------------------------------------------
class DatasetContainer(object):
def _from_files(self, dirname, prefix=""):
"""Add datasets from files in a directory.
Parameters
----------
dirname : string
Directory name.
prefix : string
Prefix.
"""
for p in (here / dirname).iterdir():
n = prefix + wt_kit.string2identifier(p.name.split(".")[0])
setattr(self, n, p)
def _from_directory(self, dirname, prefix=""):
"""Add dataset from files in a directory.
Parameters
----------
dirname : string
Directory name.
prefix : string
Prefix.
"""
ps = list((here / dirname).iterdir())
n = prefix + wt_kit.string2identifier(dirname.name)
setattr(self, n, ps)
# --- fill ----------------------------------------------------------------------------------------
BrunoldrRaman = DatasetContainer()
BrunoldrRaman._from_files(here / "BrunoldrRaman")
Cary = DatasetContainer()
Cary._from_files("Cary")
COLORS = DatasetContainer()
COLORS._from_files(here / "COLORS" / "v0.2", prefix="v0p2_")
COLORS._from_files(here / "COLORS" / "v2.2", prefix="v2p2_")
JASCO = DatasetContainer()
JASCO._from_files("JASCO")
KENT = DatasetContainer()
KENT._from_directory(here / "KENT" / "LDS821 TRSF")
KENT._from_directory(here / "KENT" / "LDS821 DOVE")
KENT._from_directory(here / "KENT" / "PbSe 2D delay B")
LabRAM = DatasetContainer()
LabRAM._from_files(here / "LabRAM")
ocean_optics = DatasetContainer()
ocean_optics._from_files("ocean_optics")
PyCMDS = DatasetContainer()
PyCMDS._from_files("PyCMDS")
Shimadzu = DatasetContainer()
Shimadzu._from_files("Shimadzu")
Solis = DatasetContainer()
Solis._from_files("Solis")
spcm = DatasetContainer()
spcm._from_files("spcm")
Tensor27 = DatasetContainer()
Tensor27._from_files("Tensor27")
wt5 = DatasetContainer()
wt5._from_files(here / "wt5" / "v1.0.0", prefix="v1p0p0_")
wt5._from_files(here / "wt5" / "v1.0.1", prefix="v1p0p1_")
# --- pretty namespace ----------------------------------------------------------------------------
__all__ = [
"BrunoldrRaman",
"Cary",
"COLORS",
"JASCO",
"KENT",
"ocean_optics",
"PyCMDS",
"Solis",
"Tensor27",
"wt5",
]
|
{
"content_hash": "f9c21c24921b26df75973d00d2f32f6e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 99,
"avg_line_length": 24.178571428571427,
"alnum_prop": 0.5107090103397341,
"repo_name": "wright-group/WrightTools",
"id": "e9762b445899b5590ae8bcfaca7ff9da0bdeca59",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WrightTools/datasets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AGS Script",
"bytes": "88851"
},
{
"name": "Python",
"bytes": "604837"
},
{
"name": "Shell",
"bytes": "54"
},
{
"name": "TeX",
"bytes": "11769"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import urllib
import urllib2
import logging
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
from flexget.utils.tools import urlopener
from flexget.utils.search import torrent_availability, normalize_unicode
timeout = 10
import socket
socket.setdefaulttimeout(timeout)
log = logging.getLogger('newtorrents')
class NewTorrents:
"""NewTorrents urlrewriter and search plugin."""
def __init__(self):
self.resolved = []
# UrlRewriter plugin API
def url_rewritable(self, task, entry):
# Return true only for urls that can and should be resolved
if entry['url'].startswith('http://www.newtorrents.info/down.php?'):
return False
return entry['url'].startswith('http://www.newtorrents.info') and not entry['url'] in self.resolved
# UrlRewriter plugin API
def url_rewrite(self, task, entry):
url = entry['url']
if (url.startswith('http://www.newtorrents.info/?q=') or
url.startswith('http://www.newtorrents.info/search')):
results = self.entries_from_search(entry['title'], url=url)
if not results:
raise UrlRewritingError("No matches for %s" % entry['title'])
url = results[0]['url']
else:
url = self.url_from_page(url)
if url:
entry['url'] = url
self.resolved.append(url)
else:
raise UrlRewritingError('Bug in newtorrents urlrewriter')
# Search plugin API
def search(self, task, entry, config=None):
entries = set()
for search_string in entry.get('search_string', [entry['title']]):
entries.update(self.entries_from_search(search_string))
return entries
@plugin.internet(log)
def url_from_page(self, url):
"""Parses torrent url from newtorrents download page"""
try:
page = urlopener(url, log)
data = page.read()
except urllib2.URLError:
raise UrlRewritingError('URLerror when retrieving page')
p = re.compile("copy\(\'(.*)\'\)", re.IGNORECASE)
f = p.search(data)
if not f:
# the link in which plugin relies is missing!
raise UrlRewritingError('Failed to get url from download page. Plugin may need a update.')
else:
return f.group(1)
@plugin.internet(log)
def entries_from_search(self, name, url=None):
"""Parses torrent download url from search results"""
name = normalize_unicode(name)
if not url:
url = 'http://www.newtorrents.info/search/%s' % urllib.quote(name.encode('utf-8'), safe=b':/~?=&%')
log.debug('search url: %s' % url)
html = urlopener(url, log).read()
# fix </SCR'+'IPT> so that BS does not crash
# TODO: should use beautifulsoup massage
html = re.sub(r'(</SCR.*?)...(.*?IPT>)', r'\1\2', html)
soup = get_soup(html)
# saving torrents in dict
torrents = []
for link in soup.find_all('a', attrs={'href': re.compile('down.php')}):
torrent_url = 'http://www.newtorrents.info%s' % link.get('href')
release_name = link.parent.next.get('title')
# quick dirty hack
seed = link.find_next('td', attrs={'class': re.compile('s')}).renderContents()
if seed == 'n/a':
seed = 0
else:
try:
seed = int(seed)
except ValueError:
log.warning('Error converting seed value (%s) from newtorrents to integer.' % seed)
seed = 0
#TODO: also parse content_size and peers from results
torrents.append(Entry(title=release_name, url=torrent_url, torrent_seeds=seed,
search_sort=torrent_availability(seed, 0)))
# sort with seed number Reverse order
torrents.sort(reverse=True, key=lambda x: x.get('search_sort', 0))
# choose the torrent
if not torrents:
dashindex = name.rfind('-')
if dashindex != -1:
return self.entries_from_search(name[:dashindex])
else:
return torrents
else:
if len(torrents) == 1:
log.debug('found only one matching search result.')
else:
log.debug('search result contains multiple matches, sorted %s by most seeders' % torrents)
return torrents
@event('plugin.register')
def register_plugin():
plugin.register(NewTorrents, 'newtorrents', groups=['urlrewriter', 'search'], api_ver=2)
|
{
"content_hash": "620faa25af32d6d394a86e8cf860e416",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 111,
"avg_line_length": 37.674418604651166,
"alnum_prop": 0.5936213991769548,
"repo_name": "vfrc2/Flexget",
"id": "3daef35b4c9be15fefefc134a6c95e9fa8b75019",
"size": "4860",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "flexget/plugins/urlrewrite_newtorrents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2053210"
}
],
"symlink_target": ""
}
|
from auth import *
test()
|
{
"content_hash": "71c17fd916df359bdd9a7528500c8884",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 18,
"avg_line_length": 9,
"alnum_prop": 0.6666666666666666,
"repo_name": "val314159/old.authsvr",
"id": "36a10304755a95c63f038467467ec195fe1ca8e8",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recreate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1134"
},
{
"name": "Python",
"bytes": "4021"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
"""The signac framework aids in the management of large and
heterogeneous data spaces.
It provides a simple and robust data model to create a
well-defined indexable storage layout for data and metadata.
This makes it easier to operate on large data spaces,
streamlines post-processing and analysis and makes data
collectively accessible."""
from __future__ import absolute_import
from . import contrib
from . import db
from . import cite
from . import errors
from . import sync
from .contrib import Project
from .contrib import get_project
from .contrib import init_project
from .contrib import fetch
from .contrib import export_one
from .contrib import export
from .contrib import export_to_mirror
from .contrib import export_pymongo
from .contrib import fetch_one # deprecated
from .contrib import filesystems as fs
from .contrib import Collection
from .contrib import index_files
from .contrib import index
from .contrib import RegexFileCrawler
from .contrib import MasterCrawler
from .contrib import SignacProjectCrawler
from .db import get_database
from .core.jsondict import buffer_reads_writes as buffered
from .core.jsondict import in_buffered_mode as is_buffered
from .core.jsondict import flush_all as flush
from .core.jsondict import get_buffer_size
from .core.jsondict import get_buffer_load
__version__ = '0.9.3'
__all__ = ['__version__', 'contrib', 'db', 'errors', 'sync',
'cite',
'Project', 'get_project', 'init_project',
'get_database', 'fetch', 'fetch_one',
'export_one', 'export', 'export_to_mirror',
'Collection',
'export_pymongo', 'fs',
'index_files', 'index',
'RegexFileCrawler',
'MasterCrawler',
'SignacProjectCrawler',
'buffered', 'is_buffered', 'flush', 'get_buffer_size', 'get_buffer_load',
]
|
{
"content_hash": "5ff17718f3c9ca596cd8a5856aa5bff6",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 84,
"avg_line_length": 34.370370370370374,
"alnum_prop": 0.7133620689655172,
"repo_name": "csadorf/signac",
"id": "6eea4e4b8ae049c03132a134e8718377fe54da0b",
"size": "2002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signac/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "673188"
},
{
"name": "Shell",
"bytes": "6879"
},
{
"name": "TeX",
"bytes": "938"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, make_response, render_template, redirect, session, url_for
import json
import os
import datetime
import config
import loader
import dbutils
mypseudo = Flask(__name__, static_folder="static", template_folder="templates")
mypseudo.debug = True
mypseudo.secret_key = os.urandom(24)
@mypseudo.route('/')
def index():
if 'user' in session:
return render_template('index.html')
else:
return redirect(url_for('login'))
@mypseudo.route('/callbacks', methods=['GET'])
def callbacks():
if 'user' in session:
return dbutils.listCallbacks()
else:
return redirect(url_for('login'))
@mypseudo.route('/callback/<id>', methods=['GET','POST', 'DELETE'])
def callback(id=False):
if 'user' in session:
if request.method == 'GET':
return dbutils.getCallback(id)
elif request.method =='POST':
return dbutils.saveCallback(request.data)
elif request.method=='DELETE':
return dbutils.deleteCallback(request.data)
else:
return redirect(url_for('login'))
@mypseudo.route('/script/<name>')
def script(name=""):
if 'user' in session:
return json.dumps(loader.getUsage(name), sort_keys=True, indent=4, separators=(',',': '), cls=dbutils.DateTimeEncoder, ensure_ascii=True)
else:
return redirect(url_for('index'))
@mypseudo.route('/scripts')
def scripts():
if 'user' in session:
return json.dumps(loader.loadValid(), sort_keys=True, indent=4, separators=(',',': '))
else:
return redirect(url_for('login'))
@mypseudo.route('/login', methods=['POST','GET'])
def login():
if 'user' in session:
return redirect(url_for('index'))
else:
if request.method == 'POST':
if 'user' in request.form and 'passwd' in request.form:
if request.form['user'] == config.config['user'] and request.form['passwd'] == config.config['passwd']:
session['user'] = request.form['user']
return redirect(url_for('index'))
return render_template('login.html')
@mypseudo.route('/logout')
def logout():
session.pop('user', None)
return redirect(url_for('index'))
if __name__ == '__main__':
mypseudo.run(host='0.0.0.0')
|
{
"content_hash": "b5c66a1ae738a05555732fab5545580d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 139,
"avg_line_length": 28.77777777777778,
"alnum_prop": 0.6906370656370656,
"repo_name": "wufufufu/mypseudo",
"id": "707b2e453382c9d94a65bcac7951ab05dae19a0a",
"size": "2094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "116259"
},
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "PHP",
"bytes": "90"
},
{
"name": "Perl",
"bytes": "937"
},
{
"name": "Python",
"bytes": "14066"
},
{
"name": "Shell",
"bytes": "532"
}
],
"symlink_target": ""
}
|
import os
import platform
import shutil
import behave
from test.behave_utils.utils import drop_database_if_exists, start_database_if_not_started,\
create_database, \
run_command, check_user_permissions, run_gpcommand
from steps.mirrors_mgmt_utils import MirrorMgmtContext
from steps.gpconfig_mgmt_utils import GpConfigContext
from steps.gpssh_exkeys_mgmt_utils import GpsshExkeysMgmtContext
from gppylib.db import dbconn
def before_all(context):
if map(int, behave.__version__.split('.')) < [1,2,6]:
raise Exception("Requires at least behave version 1.2.6 (found %s)" % behave.__version__)
def before_feature(context, feature):
# we should be able to run gpexpand without having a cluster initialized
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors',
'gpconfig', 'gpssh-exkeys', 'gpstop', 'gpinitsystem', 'cross_subnet']
if set(context.feature.tags).intersection(tags_to_skip):
return
drop_database_if_exists(context, 'testdb')
drop_database_if_exists(context, 'bkdb')
drop_database_if_exists(context, 'fullbkdb')
drop_database_if_exists(context, 'schematestdb')
if 'analyzedb' in feature.tags:
start_database_if_not_started(context)
drop_database_if_exists(context, 'incr_analyze')
create_database(context, 'incr_analyze')
drop_database_if_exists(context, 'incr_analyze_2')
create_database(context, 'incr_analyze_2')
context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze'), unsetSearchPath=False)
context.dbname = 'incr_analyze'
# setting up the tables that will be used
context.execute_steps(u"""
Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public"
And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public"
""")
if 'minirepro' in feature.tags:
start_database_if_not_started(context)
minirepro_db = 'minireprodb'
drop_database_if_exists(context, minirepro_db)
create_database(context, minirepro_db)
context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db), unsetSearchPath=False)
context.dbname = minirepro_db
dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)')
dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)')
dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)')
dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e')
dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f')
dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c')
dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)')
dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)')
dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)')
context.conn.commit()
if 'gppkg' in feature.tags:
run_command(context, 'bash demo/gppkg/generate_sample_gppkg.sh buildGppkg')
run_command(context, 'cp -f /tmp/sample-gppkg/sample.gppkg test/behave/mgmt_utils/steps/data/')
def after_feature(context, feature):
if 'analyzedb' in feature.tags:
context.conn.close()
if 'minirepro' in feature.tags:
context.conn.close()
if 'gpconfig' in feature.tags:
context.execute_steps(u'''
Then the user runs "gpstop -ar"
And gpstop should return a return code of 0
''')
def before_scenario(context, scenario):
if "skip_fixme_ubuntu18.04" in scenario.effective_tags:
if platform.linux_distribution()[0].lower() == "ubuntu" and platform.linux_distribution()[1] == "18.04":
scenario.skip("skipping scenario tagged with @skip_fixme_ubuntu18.04")
return
if "skip" in scenario.effective_tags:
scenario.skip("skipping scenario tagged with @skip")
return
if 'gpmovemirrors' in context.feature.tags:
context.mirror_context = MirrorMgmtContext()
if 'gpconfig' in context.feature.tags:
context.gpconfig_context = GpConfigContext()
if 'gpssh-exkeys' in context.feature.tags:
context.gpssh_exkeys_context = GpsshExkeysMgmtContext(context)
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors',
'gpconfig', 'gpssh-exkeys', 'gpstop', 'gpinitsystem', 'cross_subnet']
if set(context.feature.tags).intersection(tags_to_skip):
return
if 'analyzedb' not in context.feature.tags:
start_database_if_not_started(context)
drop_database_if_exists(context, 'testdb')
def after_scenario(context, scenario):
#TODO: you'd think that the scenario.skip() in before_scenario() would
# cause this to not be needed
if "skip" in scenario.effective_tags:
return
if 'tablespaces' in context:
for tablespace in context.tablespaces.values():
tablespace.cleanup()
if 'gpstop' in scenario.effective_tags:
context.execute_steps(u'''
# restart the cluster so that subsequent tests re-use the existing demo cluster
Then the user runs "gpstart -a"
And gpstart should return a return code of 0
''')
# NOTE: gpconfig after_scenario cleanup is in the step `the gpconfig context is setup`
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpinitstandby',
'gpconfig', 'gpstop', 'gpinitsystem', 'cross_subnet']
if set(context.feature.tags).intersection(tags_to_skip):
return
tags_to_cleanup = ['gpmovemirrors', 'gpssh-exkeys']
if set(context.feature.tags).intersection(tags_to_cleanup):
if 'temp_base_dir' in context:
shutil.rmtree(context.temp_base_dir)
tags_to_not_restart_db = ['analyzedb', 'gpssh-exkeys']
if not set(context.feature.tags).intersection(tags_to_not_restart_db):
start_database_if_not_started(context)
home_dir = os.path.expanduser('~')
if not check_user_permissions(home_dir, 'write') and hasattr(context, 'orig_write_permission')\
and context.orig_write_permission:
run_command(context, 'sudo chmod u+w %s' % home_dir)
if os.path.isdir('%s/gpAdminLogs.bk' % home_dir):
shutil.move('%s/gpAdminLogs.bk' % home_dir, '%s/gpAdminLogs' % home_dir)
if 'gpssh' in context.feature.tags:
run_command(context, 'sudo tc qdisc del dev lo root netem')
# for cleaning up after @given('"{path}" has its permissions set to "{perm}"')
if (hasattr(context, 'path_for_which_to_restore_the_permissions') and
hasattr(context, 'permissions_to_restore_path_to')):
os.chmod(context.path_for_which_to_restore_the_permissions, context.permissions_to_restore_path_to)
elif hasattr(context, 'path_for_which_to_restore_the_permissions'):
raise Exception('Missing permissions_to_restore_path_to for %s' %
context.path_for_which_to_restore_the_permissions)
elif hasattr(context, 'permissions_to_restore_path_to'):
raise Exception('Missing path_for_which_to_restore_the_permissions despite the specified permission %o' %
context.permissions_to_restore_path_to)
|
{
"content_hash": "2a74550ece233e9221eab3152eb5ae69",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 139,
"avg_line_length": 47.969325153374236,
"alnum_prop": 0.6568614912392889,
"repo_name": "ashwinstar/gpdb",
"id": "08ed9425f99adfc924566646b0a33b3d2b204c48",
"size": "7819",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gpMgmt/test/behave/mgmt_utils/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12768"
},
{
"name": "C",
"bytes": "42705726"
},
{
"name": "C++",
"bytes": "2839973"
},
{
"name": "CMake",
"bytes": "3425"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11990"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "342783"
},
{
"name": "HTML",
"bytes": "653351"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229553"
},
{
"name": "M4",
"bytes": "114378"
},
{
"name": "Makefile",
"bytes": "455445"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLSQL",
"bytes": "160856"
},
{
"name": "PLpgSQL",
"bytes": "5722287"
},
{
"name": "Perl",
"bytes": "798287"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3267988"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "81695"
},
{
"name": "SQLPL",
"bytes": "313387"
},
{
"name": "Shell",
"bytes": "453847"
},
{
"name": "TSQL",
"bytes": "3294076"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "672568"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher
from pysnmp.carrier.asynsock.dgram import udp, udp6
from pyasn1.codec.ber import decoder
from pysnmp.proto import api
from pysnmp.proto.rfc1902 import ObjectName
from urlparse import urlparse
from websocket import create_connection
import httplib
import threading, time, random
import socket
import json
import config
import logging
import os
logger = logging.getLogger("trap.SNMP_TrapRcv")
#===============================================================================
# Configure
# Usage:
# add a rule in Rules
# add a dispatch in Dispatchers
#===============================================================================
Rules=[
{'host':'10.137.130.129','trap_oid':ObjectName('1.3.6.1.4.1.2011.5.25.191.3.1')},
]
Dispatchers=[
{'target':'http://10.135.34.246:9000/channels/2/data'},
]
#Current ��SNMP�Ŀɿ��Ժ�HTTP POST�ɿ���δ���ǣ�������
#===============================================================================
# Match Rules:which data to which ...
#===============================================================================
class Rules(object):
def __init__(self):
pass
def equal(self,rule):
pass
def key(self):
return None
def value(self):
return None
#订阅条件
class TrapRules(Rules):
def __init__(self,host,trap_oid):
self.type = 'TRAP'
self.host = host
self.trap_oid = trap_oid
super(TrapRules,self).__init__()
def equal(self,rule):
if self.host == rule.host and self.trap_oid == rule.trap_oid:
return True
return False
def key(self):
return str(self.type)+str(self.host)+str(self.trap_oid)
def value(self):
return str(self.host)
#===============================================================================
# which data define
#===============================================================================
#community="%s"
v1trapformat = '''
<snmpTrapPdu xmlns="http://huawei.com/common/trap">
<snmpv1trap>
<timestamp>%s</timestamp>
<agentaddr>%s</agentaddr>
<eid>%s</eid>
<gid>%s</gid>
<sid>%s</sid>
<vbs>
%s
</vbs>
</snmpv1trap>
</snmpTrapPdu>
'''
v2trapformat = '''
<snmpTrapPdu xmlns="http://huawei.com/common/trap">
<snmpv2trap>
<timestamp>%s</timestamp>
<agentaddr>%s</agentaddr>
<trapoid>%s</trapoid>
<vbs>
%s
</vbs>
</snmpv2trap>
</snmpTrapPdu>
'''
class Notify(object):
def __init__(self):
pass
def key(self):
pass
def body(self):
pass
class SnmpTrap(Notify):
msgVer = None
transportDomain=None
transportAddress=None
varBinds=None
def __init__(self,msgVer,transportDomain,transportAddress,varBinds):
self.msgVer = msgVer
self.transportDomain = transportDomain
self.host = transportAddress[0] #ipaddress only
self.port = transportAddress[1]
self.varBinds = varBinds
self.trap_oid = 1
if msgVer == 1:
self.trap_oid = varBinds[1][1][0][0][2] #ObjectName
#print 'Notification %s received from %s' % (self.trap_oid, transportAddress)
logger.warning('Notification %s received from %s' % (self.trap_oid, transportAddress))
if msgVer == 0:
#print 'Notification received from %s' %(str(transportAddress))
logger.warning('Notification received from %s'%(str(transportAddress)))
self.trap_oid = 1
super(SnmpTrap,self).__init__()
def key(self):
return 'TRAP'+str(self.host)+str(self.trap_oid)
def body(self):
return {'body':{'host':self.host,'trap_oid':str(self.trap_oid),'msgVer':self.msgVer,'varBinds':str(self.varBinds)}}
#return {'host':self.host,'trap_oid':str(self.trap_oid),'msgVer':self.msgVer,'varBinds':str(self.varBinds)}
#return {'data':{"host":"1.1.1.1",
# 'trap_oid':'1.3.6.1'}
#}
#===============================================================================
#
#===============================================================================
class Dispatcher():
"""From Snmp to Http(channel)
"""
filter=[]
match_dict={} #{'key':[a,b,c]}
def __init__(self, mychannels):
self.channels = mychannels
def dispatch(self,notify,traprecord):
self.channels.dispatch(notify,traprecord)
""" key = ''
if notify.key() in self.match_dict:
key = notify.key()
elif 'TRAP' in self.match_dict:
key = 'TRAP'
if key != '': """
#当前是Notification不可靠
def http_post(self,httpurl,body):
#_,netloc,path,_,_,_= urlparse(httpurl)
from httpc_httplib import httpclient
try:
hc = httpclient()
except:
print 'create httpclient fail'
return
#print 'send to ',httpurl
#print 'send body:\n',body
from xmlTojson import isdk_convert_xml2json
try:
body = isdk_convert_xml2json(body)
body = json.dumps(body, sort_keys=True, indent=4, separators=(', ',': '))
body=json.loads(body)
status,_ = hc._post(httpurl,body)
except:
print 'post to %s fail'%httpurl
return
class AsynsockDispatcherEX(AsynsockDispatcher):
def __init__(self):
self.thread_stop = False
AsynsockDispatcher.__init__(self)
def runDispatcher(self, timeout=0.0):
while ( self.jobsArePending() or self.transportsAreWorking()) and self.thread_stop == False:
from asyncore import poll
from time import time
poll(timeout and timeout or self.timeout, self.getSocketMap())
self.handleTimerTick(time())
logger.debug( 'quit from runDispatcher')
def stop(self):
self.thread_stop = True
import threading
import time
class SNMP_NotifyRcv_Thread(threading.Thread): #The timer class is derived from the class threading.Thread
_dispatchers = []
def __init__(self,ip,port):
threading.Thread.__init__(self)
#self.thread_stop = False
self.notifyDispatcher = AsynsockDispatcherEX()
self.notifyDispatcher.registerRecvCbFun(self.cbFun)
# UDP/IPv4
try:
self.notifyDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode((ip, int(port))) )
self.notifyDispatcher.jobStarted(1)
except Exception as e:
logger.error('failed to create the socket : %s' % e)
def run(self): #Overwrite run() method, put what you want the thread do here
try:
# Dispatcher will never finish as job#1 never reaches zero
self.notifyDispatcher.runDispatcher(1.0)
except:
self.notifyDispatcher.closeDispatcher()
raise
logger.debug('quit from run')
def stop(self):
#self.thread_stop = True
self.notifyDispatcher.stop()
time.sleep(2)
self.notifyDispatcher.closeDispatcher()
def register_dispatcher(self,dispatcher):
self._dispatchers.append(dispatcher)
pass
#callback function
def cbFun(self,transportDispatcher, transportDomain, transportAddress, wholeMsg):
record = ''
record = record + '<snmpTrapPdu xmlns="http://huawei.com/common/trap">\n'
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
logger.warning('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
#logger.warning('Notification message from %s:%s: ' % (transportDomain, transportAddress))
reqPDU = pMod.apiMessage.getPDU(reqMsg)
community = pMod.apiMessage.getCommunity(reqMsg)
timestamp = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
Enterprise = (str)(pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint())
agentaddr = pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()
GenericTrapid = pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()
SpecificTrapid = pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()
varBinds = pMod.apiTrapPDU.getVarBindList(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBindList(reqPDU)
trapoid = (str)(varBinds[1][1][0][0][2])
receivedTrap = SnmpTrap(msgVer,transportDomain,transportAddress,varBinds)
trapvbs = ''
for oid, val in varBinds:
#logger.warning ('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
trapvbs = trapvbs + '<vb>'
trapvbs = trapvbs + '\n <oid>' + oid.prettyPrint() + '</oid>'
#print (val.getComponent(1)).prettyPrint()
value = (val.getComponent(1)).prettyPrint()
trapvbs = trapvbs + '\n <value>' + value + '</value>\n'
trapvbs = trapvbs + ' </vb>\n'
#no print community message
if msgVer == api.protoVersion1:
traprecord = v1trapformat%(timestamp,agentaddr,Enterprise,GenericTrapid,SpecificTrapid,trapvbs)
else:
traprecord = v2trapformat%(timestamp,transportAddress[0],trapoid,trapvbs)
#print traprecord
logger.debug(traprecord)
for i in self._dispatchers:
i.dispatch(receivedTrap, traprecord)
def startReceiveTrap(serverip, trapport=162, channels=None):
dispatcher = Dispatcher(channels)
host=serverip
disip = host
if disip == '':
disip = 'localhost'
logger.info('%s port %s is listen trap'%(disip,trapport) )
nt=SNMP_NotifyRcv_Thread(host,trapport)
nt.register_dispatcher(dispatcher)
nt.setDaemon(False)
nt.start()
logger.info('Start to receive trap')
return nt
def stopReceiveTrap(nt):
nt.stop()
logger.info( 'quit from TrapThread')
if __name__ == '__main__':
dispatcher = Dispatcher(None)
|
{
"content_hash": "eec391addb84153aa8d73bd4734bccdc",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 123,
"avg_line_length": 36.890728476821195,
"alnum_prop": 0.5285880980163361,
"repo_name": "HuaweiSNC/OPS2",
"id": "4066fa5d891ef5d30ab084adee2e9bc782e22945",
"size": "11231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/manager/trap/snmp_traprcv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1893"
},
{
"name": "HTML",
"bytes": "1604"
},
{
"name": "Python",
"bytes": "698028"
},
{
"name": "Shell",
"bytes": "2940"
}
],
"symlink_target": ""
}
|
import usa.emissions
#import usa.food_sector
|
{
"content_hash": "8fddf48c914179f960ec46bf6736f7a4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 22.5,
"alnum_prop": 0.8222222222222222,
"repo_name": "sonya/eea",
"id": "5f0eb9ec576fa92ae7c28d8fd57425fa74196e27",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/run_usa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "441743"
},
{
"name": "Shell",
"bytes": "31869"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import copy
import listservers
import buckets
import info
import util_cli as util
import cb_bin_client
import stats_buffer
class StatsCollector:
def __init__(self, log):
self.log = log
def seg(self, k, v):
# Parse ('some_stat_x_y', 'v') into (('some_stat', x, y), v)
ka = k.split('_')
k = '_'.join(ka[0:-1])
kstart, kend = [int(x) for x in ka[-1].split(',')]
return ((k, kstart, kend), int(v))
def retrieve_node_stats(self, nodeInfo, nodeStats):
nodeStats['portDirect'] = nodeInfo['ports']['direct']
nodeStats['portProxy'] = nodeInfo['ports']['proxy']
nodeStats['clusterMembership'] = nodeInfo['clusterMembership']
nodeStats['os'] = nodeInfo['os']
nodeStats['uptime'] = nodeInfo['uptime']
nodeStats['version'] = nodeInfo['version']
#memory
nodeStats['memory'] = {}
nodeStats['memory']['allocated'] = nodeInfo['mcdMemoryAllocated']
nodeStats['memory']['reserved'] = nodeInfo['mcdMemoryReserved']
nodeStats['memory']['free'] = nodeInfo['memoryFree']
nodeStats['memory']['quota'] = nodeInfo['memoryQuota']
nodeStats['memory']['total'] = nodeInfo['memoryTotal']
#storageInfo
nodeStats['StorageInfo'] = {}
if nodeInfo['storageTotals'] is not None:
#print nodeInfo
hdd = nodeInfo['storageTotals']['hdd']
if hdd is not None:
nodeStats['StorageInfo']['hdd'] = {}
nodeStats['StorageInfo']['hdd']['free'] = hdd['free']
nodeStats['StorageInfo']['hdd']['quotaTotal'] = hdd['quotaTotal']
nodeStats['StorageInfo']['hdd']['total'] = hdd['total']
nodeStats['StorageInfo']['hdd']['used'] = hdd['used']
nodeStats['StorageInfo']['hdd']['usedByData'] = hdd['usedByData']
ram = nodeInfo['storageTotals']['ram']
if ram is not None:
nodeStats['StorageInfo']['ram'] = {}
nodeStats['StorageInfo']['ram']['quotaTotal'] = ram['quotaTotal']
nodeStats['StorageInfo']['ram']['total'] = ram['total']
nodeStats['StorageInfo']['ram']['used'] = ram['used']
nodeStats['StorageInfo']['ram']['usedByData'] = ram['usedByData']
if ram.has_key('quotaUsed'):
nodeStats['StorageInfo']['ram']['quotaUsed'] = ram['quotaUsed']
else:
nodeStats['StorageInfo']['ram']['quotaUsed'] = 0
#system stats
nodeStats['systemStats'] = {}
nodeStats['systemStats']['cpu_utilization_rate'] = nodeInfo['systemStats']['cpu_utilization_rate']
nodeStats['systemStats']['swap_total'] = nodeInfo['systemStats']['swap_total']
nodeStats['systemStats']['swap_used'] = nodeInfo['systemStats']['swap_used']
curr_items = 0
curr_items_tot = 0
vb_rep_curr_items = 0
if nodeInfo['interestingStats'] is not None:
if nodeInfo['interestingStats'].has_key('curr_items'):
curr_items = nodeInfo['interestingStats']['curr_items']
else:
curr_items = 0
if nodeInfo['interestingStats'].has_key('curr_items_tot'):
curr_items_tot = nodeInfo['interestingStats']['curr_items_tot']
else:
curr_items_tot = 0
if nodeInfo['interestingStats'].has_key('vb_replica_curr_items'):
vb_rep_curr_items = nodeInfo['interestingStats']['vb_replica_curr_items']
else:
vb_rep_curr_items = 0
nodeStats['systemStats']['currentItems'] = curr_items
nodeStats['systemStats']['currentItemsTotal'] = curr_items_tot
nodeStats['systemStats']['replicaCurrentItems'] = vb_rep_curr_items
def get_hostlist(self, server, port, user, password, opts):
try:
opts.append(("-o", "return"))
nodes = listservers.ListServers().runCmd('host-list', server, port, user, password, False, opts)
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
node_stats = {"host" : node_server,
"port" : node_port,
"status" : node['status'],
"master" : server}
stats_buffer.nodes[node['hostname']] = node_stats
if node['status'] == 'healthy':
node_info = info.Info().runCmd('get-server-info', node_server, node_port, user, password, False, opts)
self.retrieve_node_stats(node_info, node_stats)
else:
self.log.error("Unhealthy node: %s:%s" %(node_server, node['status']))
return nodes
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_bucketlist(self, server, port, user, password, opts):
try:
bucketlist = buckets.Buckets().runCmd('bucket-get', server, port, user, password, False, opts)
for bucket in bucketlist:
bucket_name = bucket['name']
self.log.info("bucket: %s" % bucket_name)
bucketinfo = {}
bucketinfo['name'] = bucket_name
bucketinfo['bucketType'] = bucket['bucketType']
bucketinfo['authType'] = bucket['authType']
bucketinfo['saslPassword'] = bucket['saslPassword']
bucketinfo['numReplica'] = bucket['replicaNumber']
bucketinfo['ramQuota'] = bucket['quota']['ram']
bucketinfo['master'] = server
bucketStats = bucket['basicStats']
bucketinfo['bucketStats'] = {}
bucketinfo['bucketStats']['diskUsed'] = bucketStats['diskUsed']
bucketinfo['bucketStats']['memUsed'] = bucketStats['memUsed']
bucketinfo['bucketStats']['diskFetches'] = bucketStats['diskFetches']
bucketinfo['bucketStats']['quotaPercentUsed'] = bucketStats['quotaPercentUsed']
bucketinfo['bucketStats']['opsPerSec'] = bucketStats['opsPerSec']
bucketinfo['bucketStats']['itemCount'] = bucketStats['itemCount']
stats_buffer.bucket_info[bucket_name] = bucketinfo
# get bucket related stats
c = buckets.BucketStats(bucket_name)
json = c.runCmd('bucket-stats', server, port, user, password, False, opts)
stats_buffer.buckets_summary[bucket_name] = json
return bucketlist
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_mc_stats_per_node(self, mc, stats):
cmd_list = ["timings", "tap", "checkpoint", "memory", ""]
#cmd_list = ["tap"]
try:
for cmd in cmd_list:
node_stats = mc.stats(cmd)
if node_stats:
if cmd == "timings":
# need to preprocess histogram data first
vals = sorted([self.seg(*kv) for kv in node_stats.items()])
dd = {}
totals = {}
longest = 0
for s in vals:
avg = (s[0][1] + s[0][2]) / 2
k = s[0][0]
l = dd.get(k, [])
l.append((avg, s[1]))
dd[k] = l
totals[k] = totals.get(k, 0) + s[1]
for k in sorted(dd):
ccount = 0
for lbl,v in dd[k]:
ccount += v * lbl
stats[k] = ccount / totals[k]
else:
for key, val in node_stats.items():
stats[key] = val
except Exception, err:
traceback.print_exc()
def get_mc_stats(self, server, bucketlist, nodes):
#print util.pretty_print(bucketlist)
for bucket in bucketlist:
bucket_name = bucket['name']
stats_buffer.node_stats[bucket_name] = {}
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
self.log.info(" node: %s %s" % (node_server, node['ports']['direct']))
stats = {}
mc = cb_bin_client.MemcachedClient(node_server, node['ports']['direct'])
if bucket["name"] != "Default":
mc.sasl_auth_cram_md5(bucket_name.encode("utf8"), bucket["saslPassword"].encode("utf8"))
self.get_mc_stats_per_node(mc, stats)
stats_buffer.node_stats[bucket_name][node['hostname']] = stats
def get_ns_stats(self, bucketlist, server, port, user, password, opts):
for bucket in bucketlist:
bucket_name = bucket['name']
stats_buffer.buckets[bucket_name] = copy.deepcopy(stats_buffer.stats)
cmd = 'bucket-node-stats'
for scale, stat_set in stats_buffer.buckets[bucket_name].iteritems():
for stat in stat_set.iterkeys():
sys.stderr.write('.')
self.log.debug("retrieve: %s" % stat)
c = buckets.BucketNodeStats(bucket_name, stat, scale)
json = c.runCmd('bucket-node-stats', server, port, user, password, False, opts)
stats_buffer.buckets[bucket_name][scale][stat] = json
sys.stderr.write('\n')
def collect_data(self,cluster, user, password, opts):
server, port = util.hostport(cluster)
#get node list info
nodes = self.get_hostlist(server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.nodes))
#get bucket list
bucketlist = self.get_bucketlist(server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.bucket_info))
#get stats from ep-engine
self.get_mc_stats(server, bucketlist, nodes)
self.log.debug(util.pretty_print(stats_buffer.node_stats))
#get stats from ns-server
self.get_ns_stats(bucketlist, server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.buckets))
|
{
"content_hash": "e188e6ee86fae8c4c0ef547c1ee5248c",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 122,
"avg_line_length": 45.90350877192982,
"alnum_prop": 0.5320084081788649,
"repo_name": "TOTVS/mdmpublic",
"id": "874c3a215c6b317ccd317d5cbaae01fcbc7a039d",
"size": "10509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchbase-cli/lib/python/collector.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2022"
},
{
"name": "CSS",
"bytes": "232815"
},
{
"name": "Emacs Lisp",
"bytes": "370404"
},
{
"name": "Erlang",
"bytes": "162029"
},
{
"name": "HTML",
"bytes": "344417"
},
{
"name": "JavaScript",
"bytes": "1548603"
},
{
"name": "Makefile",
"bytes": "30654"
},
{
"name": "Python",
"bytes": "1227549"
},
{
"name": "Ruby",
"bytes": "1576"
},
{
"name": "Shell",
"bytes": "467880"
},
{
"name": "Tcl",
"bytes": "9372"
},
{
"name": "XSLT",
"bytes": "197715"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
cap = cv2.VideoCapture('media/video_test.avi')
while(cap.isOpened()):
ret, frame = cap.read()
print frame.shape
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
{
"content_hash": "53125d86db04b4c74459f26591c89907",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 50,
"avg_line_length": 21.866666666666667,
"alnum_prop": 0.6554878048780488,
"repo_name": "rajat1994/ComputerVisionSamples",
"id": "58ed9f502e24ad8555f44a211c55220868395c3c",
"size": "328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "video_play_from_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6260"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Minor(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.minor"
_valid_props = {
"dtick",
"gridcolor",
"griddash",
"gridwidth",
"nticks",
"showgrid",
"tick0",
"tickcolor",
"ticklen",
"tickmode",
"ticks",
"tickvals",
"tickvalssrc",
"tickwidth",
}
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# griddash
# --------
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# showgrid
# --------
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickcolor
Sets the tick color.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
"""
def __init__(
self,
arg=None,
dtick=None,
gridcolor=None,
griddash=None,
gridwidth=None,
nticks=None,
showgrid=None,
tick0=None,
tickcolor=None,
ticklen=None,
tickmode=None,
ticks=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
**kwargs,
):
"""
Construct a new Minor object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Minor`
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickcolor
Sets the tick color.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
Returns
-------
Minor
"""
super(Minor, self).__init__("minor")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.xaxis.Minor
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Minor`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("gridcolor", None)
_v = gridcolor if gridcolor is not None else _v
if _v is not None:
self["gridcolor"] = _v
_v = arg.pop("griddash", None)
_v = griddash if griddash is not None else _v
if _v is not None:
self["griddash"] = _v
_v = arg.pop("gridwidth", None)
_v = gridwidth if gridwidth is not None else _v
if _v is not None:
self["gridwidth"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("showgrid", None)
_v = showgrid if showgrid is not None else _v
if _v is not None:
self["showgrid"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "94799236be5224d7b95844f6c155eeea",
"timestamp": "",
"source": "github",
"line_count": 723,
"max_line_length": 84,
"avg_line_length": 36.98755186721991,
"alnum_prop": 0.5510806970308877,
"repo_name": "plotly/plotly.py",
"id": "08ccaacfc71a66dd62c25c3e91b538e7ff46a9f5",
"size": "26742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/xaxis/_minor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Created on Sun Mar 5 10:27:24 2017
@author: mje
"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import (permutation_test_score, StratifiedKFold)
from sklearn.externals import joblib
from my_settings import (data_path, source_folder, step_size, window_size)
# make time points
times = np.arange(-4000, 1001, 1)
times = times / 1000.
selected_times = times[::step_size]
n_time = sum((selected_times + window_size) < times[-1])
# Load data
subjects = [
"0008", "0009", "0010", "0012", "0014", "0015", "0016", "0017", "0018",
"0019", "0020", "0021", "0022"
]
cls_all = []
pln_all = []
for subject in subjects:
cls = np.load(source_folder + "graph_data/%s_cls_pow_sliding.npy" %
subject).item()
pln = np.load(source_folder + "graph_data/%s_pln_pow_sliding.npy" %
subject).item()
cls_tmp = []
cls_tmp.append(cls["ge_alpha"])
cls_tmp.append(cls["ge_beta"])
cls_tmp.append(cls["ge_gamma_low"])
cls_tmp.append(cls["ge_gamma_high"])
pln_tmp = []
pln_tmp.append(pln["ge_alpha"])
pln_tmp.append(pln["ge_beta"])
pln_tmp.append(pln["ge_gamma_low"])
pln_tmp.append(pln["ge_gamma_high"])
cls_all.append(np.asarray(cls_tmp))
pln_all.append(np.asarray(pln_tmp))
data_cls = np.asarray(cls_all)
data_pln = np.asarray(pln_all)
# Load GAT model
gat = joblib.load(data_path + "decode_time_gen/gat_ge.jl")
# Setup data for epochs and cross validation
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(n_splits=7, shuffle=True)
perm_score_results = []
for j, est in enumerate(gat.estimators_):
for tmp in est:
lr_mean = LogisticRegression(C=0.0001)
lr_mean.coef_ = np.asarray([lr.coef_ for lr in est]).mean(
axis=0).squeeze()
lr_mean.intercept_ = np.asarray([lr.intercept_ for lr in est]).mean()
score, perm_score, pval = permutation_test_score(
lr_mean, X[:, :, j], y, cv=cv, scoring="roc_auc", n_permutations=2000)
perm_score_results.append({
"score": score,
"perm_score": perm_score,
"pval": pval
})
joblib.dump(perm_score_results,
data_path + "decode_time_gen/perm_score_results_ge.npy")
|
{
"content_hash": "1b5030de3095e29d206a75a7cf7c175b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 29.705128205128204,
"alnum_prop": 0.6340094950366854,
"repo_name": "MadsJensen/RP_scripts",
"id": "25e816582ef6abb09e1abfc178c6eeb64b606b4e",
"size": "2341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perm_test_score_gat_ge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "MATLAB",
"bytes": "375"
},
{
"name": "Python",
"bytes": "189037"
},
{
"name": "Shell",
"bytes": "3199"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Grundgeruest', '0004_ganzesmenue'),
]
operations = [
migrations.RemoveField(
model_name='ganzesmenue',
name='gehoert_zu',
),
migrations.AddField(
model_name='hauptpunkt',
name='gehoert_zu',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='Grundgeruest.GanzesMenue'),
preserve_default=False,
),
]
|
{
"content_hash": "d6b43d48cd233bce666e447e7ad99a6c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 123,
"avg_line_length": 26.5,
"alnum_prop": 0.6132075471698113,
"repo_name": "wmles/scholarium",
"id": "ad21dad8d0c0c2f50db7d814b00a93570bcbc8e5",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Grundgeruest/migrations/0005_auto_20170224_0114.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "107176"
},
{
"name": "HTML",
"bytes": "33687"
},
{
"name": "JavaScript",
"bytes": "702"
},
{
"name": "Python",
"bytes": "64048"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import base
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class BaseTest(tf.test.TestCase):
def testOneDim(self):
random.seed(42)
X = np.random.rand(1000)
y = 2 * X + 3
regressor = learn.TensorFlowLinearRegressor()
regressor.fit(X, y)
score = mean_squared_error(y, regressor.predict(X))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, [float(x) for x in iris.target])
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisClassWeight(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3,
class_weight=[0.1, 0.8, 0.1])
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertLess(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, [float(x) for x in iris.target])
self.assertEqual(
classifier.get_variable_names(),
["global_step:0", "logistic_regression/weights:0",
"logistic_regression/bias:0",
"logistic_regression/softmax_classifier/softmax_cross_entropy_loss/"
"value/avg:0",
"learning_rate:0", "logistic_regression/weights/Adagrad:0",
"logistic_regression/bias/Adagrad:0"])
def testIrisSummaries(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, iris.target, logdir="/tmp/learn_tests/")
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3,
learning_rate=0.01,
continue_training=True,
steps=250)
classifier.fit(iris.data, iris.target)
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
classifier.fit(iris.data, iris.target)
score2 = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score2, score1, "Failed with score = {0}".format(score2))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.TensorFlowLinearClassifier(n_classes=3, steps=100)
classifier.fit(iris_data(), iris_target())
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
score2 = accuracy_score(iris.target,
classifier.predict(iris_predict_data()))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowClassifier(n_classes=3, steps=250)
classifier.fit(iris.data, iris.target)
score = log_loss(iris.target, classifier.predict_proba(iris.data))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.TensorFlowLinearRegressor(batch_size=boston.data.shape[0],
steps=500,
learning_rate=0.001)
regressor.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, regressor.predict(boston.data))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
def testUnfitted(self):
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
with self.assertRaises(base.NotFittedError):
estimator.predict([1, 2, 3])
with self.assertRaises(base.NotFittedError):
estimator.save("/tmp/path")
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "6cb9e62f72240c71bd7a2a558c056915",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 88,
"avg_line_length": 40,
"alnum_prop": 0.656578947368421,
"repo_name": "peterbraden/tensorflow",
"id": "99c35f638db389a67decfd62b60395a1512afcaf",
"size": "5944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/tests/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154152"
},
{
"name": "C++",
"bytes": "8654768"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "737101"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "11651"
},
{
"name": "Jupyter Notebook",
"bytes": "1771939"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "108842"
},
{
"name": "Python",
"bytes": "5710163"
},
{
"name": "Shell",
"bytes": "164294"
},
{
"name": "TypeScript",
"bytes": "394470"
}
],
"symlink_target": ""
}
|
from iris_sdk.models.maps.base_map import BaseMap
class FeatureDldaMap(BaseMap):
address = None
list_address = None
listing_name = None
listing_type = None
status = None
subscriber_type = None
|
{
"content_hash": "13f49043788ccc176a2fd33be323d9a5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 21.8,
"alnum_prop": 0.6926605504587156,
"repo_name": "scottbarstow/iris-python",
"id": "516a8d945d23a5b77da3c92c6e49eede63edb360",
"size": "241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iris_sdk/models/maps/feature_dlda.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
}
|
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, print_function, with_statement
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "4.2.1"
version_info = (4, 2, 1, 0)
|
{
"content_hash": "f173c718f348447882c7476d3044def2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 40.53846153846154,
"alnum_prop": 0.7533206831119544,
"repo_name": "bdh1011/wau",
"id": "5588295e49e064b1c542aeca0ff94eb0178bd529",
"size": "1124",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/tornado/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
from google.cloud import functions_v2
async def sample_generate_upload_url():
# Create a client
client = functions_v2.FunctionServiceAsyncClient()
# Initialize request argument(s)
request = functions_v2.GenerateUploadUrlRequest(
parent="parent_value",
)
# Make the request
response = await client.generate_upload_url(request=request)
# Handle the response
print(response)
# [END cloudfunctions_v2_generated_FunctionService_GenerateUploadUrl_async]
|
{
"content_hash": "4ebf17e6fcc10219a5882b2d67350816",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 26.210526315789473,
"alnum_prop": 0.7289156626506024,
"repo_name": "googleapis/python-functions",
"id": "e05b5f414a25597f36ffa3062daf9b6acd1373d9",
"size": "1903",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/cloudfunctions_v2_generated_function_service_generate_upload_url_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "872751"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.workflows.executions_v1beta.types import executions
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-workflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ExecutionsTransport(abc.ABC):
"""Abstract transport class for Executions."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "workflowexecutions.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_executions: gapic_v1.method.wrap_method(
self.list_executions,
default_timeout=None,
client_info=client_info,
),
self.create_execution: gapic_v1.method.wrap_method(
self.create_execution,
default_timeout=None,
client_info=client_info,
),
self.get_execution: gapic_v1.method.wrap_method(
self.get_execution,
default_timeout=None,
client_info=client_info,
),
self.cancel_execution: gapic_v1.method.wrap_method(
self.cancel_execution,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_executions(
self,
) -> Callable[
[executions.ListExecutionsRequest],
Union[
executions.ListExecutionsResponse,
Awaitable[executions.ListExecutionsResponse],
],
]:
raise NotImplementedError()
@property
def create_execution(
self,
) -> Callable[
[executions.CreateExecutionRequest],
Union[executions.Execution, Awaitable[executions.Execution]],
]:
raise NotImplementedError()
@property
def get_execution(
self,
) -> Callable[
[executions.GetExecutionRequest],
Union[executions.Execution, Awaitable[executions.Execution]],
]:
raise NotImplementedError()
@property
def cancel_execution(
self,
) -> Callable[
[executions.CancelExecutionRequest],
Union[executions.Execution, Awaitable[executions.Execution]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("ExecutionsTransport",)
|
{
"content_hash": "0d189fc1b69a9b96f5e74ae6d540dd0a",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 101,
"avg_line_length": 36.1578947368421,
"alnum_prop": 0.6119359534206695,
"repo_name": "googleapis/python-workflows",
"id": "fb5d62e01513c3553773a9d3dd30f99f462d0ebd",
"size": "7470",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/workflows/executions_v1beta/services/executions/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "956538"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
import os
import re
import uuid
from flask import render_template, Blueprint, flash, \
redirect, url_for, current_app, abort, request, Response, make_response
import psycopg2
from flask.ext.babel import gettext as _
from flask.ext.login import login_required, current_user
from sqlalchemy.sql.expression import desc
from geoalchemy2.functions import ST_Transform
from geoalchemy2.shape import to_shape
from gbi_server.lib.couchdb import extend_schema_for_couchdb, CouchDBBox, CouchDBError
from gbi_server.lib.postgis import TempPGDB
from gbi_server.model import WMTS, WFSSession, WFS
from gbi_server.forms.wfs import WFSEditForm, WFSAddLayerForm, WFSSearchForm
from gbi_server import signals
from gbi_server.extensions import db
from gbi_server.lib import tinyows
from gbi_server.util import ensure_dir
from gbi_server.config import SystemConfig
maps = Blueprint("maps", __name__, template_folder="../templates")
@maps.route('/js/gbi_translations.js')
def javascript_translation():
response = make_response(render_template('js/translation.js'))
response.headers['Content-type'] = 'application/javascript'
return response
@maps.route('/maps/wmts', methods=['GET'])
@login_required
def wmts():
couch = CouchDBBox(
current_app.config.get('COUCH_DB_URL'),
'%s_%s' % (SystemConfig.AREA_BOX_NAME, current_user.id)
)
features = [feature for feature in couch.iter_features() if isinstance(feature['geometry'], dict)]
vector_layers = []
vector_layers.append({
'name': SystemConfig.AREA_BOX_TITLE,
'features': features,
'readonly': True,
})
wmts_layers = WMTS.query.all()
return render_template(
'maps/map.html',
wmts_layers=wmts_layers,
vector_layers=vector_layers,
user=current_user
)
@maps.route('/maps/wfs', methods=['GET', 'POST'])
@login_required
def wfs_edit():
user = current_user
form = WFSEditForm()
add_layer_form = WFSAddLayerForm()
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
if add_layer_form.validate_on_submit():
title = add_layer_form.data.get('new_layer')
layer = re.sub(r'[^a-z0-9]*', '', title.lower())
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
schema = tinyows.base_schema()
if couch.layer_schema(layer):
flash(_('Layer %(title)s already exists', title=title), 'error')
else:
couch.store_layer_schema(layer, schema, title=title)
flash(_('Layer %(title)s created', title=title))
form.layer.choices = [(layer, title) for layer, title in couch.get_layer_names() if layer != current_app.config.get('USER_READONLY_LAYER')]
if form.validate_on_submit():
layer = form.data.get('layer', current_app.config.get('USER_WORKON_LAYER'))
if not int(form.data['external_editor']):
return redirect(url_for('.wfs_edit_layer', layer=layer))
else:
return redirect(url_for('.wfs_session', layer=layer))
return render_template('maps/wfs_edit.html', form=form, add_layer_form=add_layer_form, not_removable_layer=current_app.config.get('USER_WORKON_LAYER'))
@maps.route('/maps/wfs/<layer>', methods=['GET'])
@login_required
def wfs_edit_layer(layer=None):
form = WFSSearchForm()
user = current_user
wfs_session = WFSSession.by_active_user_layer(layer, user)
if wfs_session:
flash(_('external edit in progress'))
return redirect(url_for('.wfs_session', layer=layer))
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
try:
wfs_layers, wfs_layer_token = create_wfs(user, editable_layers=[layer])
except MissingSchemaError:
flash(_('layer unknown or without schema'))
abort(404)
features = [feature for feature in couch.iter_layer_features(current_app.config.get('USER_READONLY_LAYER')) if isinstance(feature['geometry'], dict)]
data_extent = couch.layer_extent(layer)
if not data_extent:
data_extent = couch.layer_extent(current_app.config.get('USER_READONLY_LAYER'))
if not data_extent:
result = db.session.query(WMTS, ST_Transform(WMTS.view_coverage, 3857)).order_by(desc(WMTS.is_background_layer)).first()
if result:
data_extent = to_shape(result[1])
titles = dict(couch.get_layer_names())
return render_template(
'maps/wfs.html',
form=form,
wfs=wfs_layers,
layers=WMTS.query.all(),
read_only_features=features,
read_only_schema=couch.layer_schema(layer)['properties'],
read_only_layer_name=current_app.config.get('AREA_BOX_TITLE'),
editable_layer=layer,
editable_layer_title=titles[layer],
data_extent=data_extent.bounds if data_extent else None,
user=current_user
)
@maps.route('/maps/wfs/remove/<layer>', methods=['GET'])
@login_required
def wfs_remove_layer(layer=None):
user = current_user
if layer in [current_app.config.get('USER_READONLY_LAYER'), current_app.config.get('USER_WORKON_LAYER')]:
flash(_('not allowed to remove this layer'))
return redirect(url_for('.wfs_edit'))
wfs_session = WFSSession.by_active_user_layer(layer, user)
if wfs_session:
flash(_('external edit in progress'))
return redirect(url_for('.wfs_session', layer=layer))
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
try:
couch.clear_layer(layer)
flash(_('Layer %(layer)s removed', layer=layer))
except CouchDBError:
flash(_('Could not remove layer %(layer)s', layer=layer), 'error')
return redirect(url_for('.wfs_edit'))
@maps.route('/maps/wfs/external/<layer>', methods=['GET'])
@login_required
def wfs_session(layer=None):
user = current_user
wfs_session = WFSSession.by_active_user_layer(layer, user)
if not wfs_session:
try:
wfs_layers, wfs_layer_token = create_wfs(user, [layer])
except MissingSchemaError:
flash(_('layer unknown or without schema'))
abort(404)
wfs_session = WFSSession(user=user, layer=layer, url=url_for('.tinyows_wfs', token=wfs_layer_token, _external=True))
db.session.add(wfs_session)
db.session.commit()
return render_template('maps/wfs_session.html', wfs_session=wfs_session)
@maps.route('/maps/wfs/cancel_changes/<layer>', methods=['GET'])
@login_required
def cancel_changes(layer=None):
user = current_user
wfs_session = WFSSession.by_active_user_layer(layer, user)
wfs_session.active=False
wfs_session.update()
db.session.commit()
flash(_('wfs changes discarded'))
return redirect(url_for('.wfs_edit'))
@maps.route('/maps/wfs/write_back/<layer>')
@login_required
def write_back(layer=None, ajax=True):
user = current_user
connection = psycopg2.connect(
database=current_app.config.get('TEMP_PG_DB'),
host=current_app.config.get('TEMP_PG_HOST'),
user=current_app.config.get('TEMP_PG_USER'),
password=current_app.config.get('TEMP_PG_PASSWORD'),
sslmode='allow',
)
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
schema = couch.layer_schema(layer)
extend_schema_for_couchdb(schema)
tablename = 'tmp%s%s' % (user.id, layer)
tmp_db = TempPGDB(connection=connection, tablename=tablename, schema=schema)
couch.store_features(layer, tmp_db.load_features(), delete_missing=tmp_db.imported_feature_ids())
connection.close()
# write changes back to PostGIS to prevent multiple inserts
create_wfs(user, [layer])
signals.features_updated.send(user)
if ajax:
return Response(response='success', status=200, headers=None, mimetype='application/json', content_type=None)
@maps.route('/maps/wfs/save_changes/<layer>')
@login_required
def save_changes(layer=None):
user = current_user
write_back(layer, False)
wfs_session = WFSSession.query.filter_by(user=user, active=True, layer=layer).first()
if wfs_session:
wfs_session.active = False
wfs_session.update()
db.session.commit()
flash(_('wfs changes saved'))
return redirect(url_for('.wfs_edit'))
class MissingSchemaError(Exception):
pass
def create_wfs(user=None, editable_layers=None):
connection = psycopg2.connect(
database=current_app.config.get('TEMP_PG_DB'),
host=current_app.config.get('TEMP_PG_HOST'),
user=current_app.config.get('TEMP_PG_USER'),
password=current_app.config.get('TEMP_PG_PASSWORD'),
sslmode='allow',
)
couch = CouchDBBox(current_app.config.get('COUCH_DB_URL'), '%s_%s' % (SystemConfig.AREA_BOX_NAME, user.id))
wfs_layer_token = uuid.uuid4().hex
wfs = []
# create layer to edit
tinyows_layers = []
titles = dict(couch.get_layer_names())
for layer in editable_layers:
wfs_layer = {
'id': id,
'name': titles[layer],
'layer': layer,
'url': url_for('.tinyows_wfs', token=wfs_layer_token, _external=True) + '?',
'srs': 'EPSG:3857',
'geometry_field': 'geometry',
'wfs_version': '1.1.0',
'feature_ns': current_app.config.get('TINYOWS_NS_URI'),
'typename': current_app.config.get('TINYOWS_NS_PREFIX'),
'writable': False,
'display_in_layerswitcher': True,
}
schema = couch.layer_schema(layer)
if not schema or 'properties' not in schema:
raise MissingSchemaError('no schema found for layer %s' % layer)
extend_schema_for_couchdb(schema)
# tinyows layername must not contain underscores
tablename = 'tmp%s%s' % (user.id, layer)
tmp_db = TempPGDB(connection=connection, tablename=tablename, schema=schema)
tmp_db.create_table()
tmp_db.insert_features(couch.iter_layer_features(layer))
# TODO remember created table in new model, store wfs_layer_token
# and remove old tinyows configs on update
wfs_layer['layer'] = tablename
wfs_layer['writable'] = current_app.config.get('USER_READONLY_LAYER') != layer
tinyows_layers.append({
'name': tablename,
'title': wfs_layer['name'],
'writable': '1' if wfs_layer['writable'] else '0',
})
wfs.append(wfs_layer)
connection.commit()
connection.close()
ensure_dir(current_app.config.get('TINYOWS_TMP_CONFIG_DIR'))
tinyows_config = os.path.join(
current_app.config.get('TINYOWS_TMP_CONFIG_DIR'),
wfs_layer_token + '.xml')
tinyows.build_config(current_app, tinyows_layers, wfs_layer_token, tinyows_config)
# wfs_layers for search
wfs_search = db.session.query(WFS).all()
for layer in wfs_search:
wfs.append({
'id': layer.id,
'name': layer.name,
'layer': layer.layer,
'url': layer.url,
'srs': layer.srs,
'geometry_field': layer.geometry,
'wfs_version': '1.1.0',
'feature_ns': layer.ns_uri,
'typename': layer.ns_prefix,
'writable': False,
'search_property': layer.search_property,
'display_in_layerswitcher': False,
})
return wfs, wfs_layer_token
@maps.route('/maps/wfs/<token>/service', methods=['GET', 'POST'])
def tinyows_wfs(token):
tinyows_config = os.path.join(
current_app.config.get('TINYOWS_TMP_CONFIG_DIR'),
token + '.xml')
tows = tinyows.TinyOWSCGI(
script=current_app.config.get('TINYOWS_BIN'),
tinyows_config=tinyows_config,
content_type=request.content_type)
try:
result = tows.open(url='/?' + request.query_string, data=request.data)
return Response(result.read(), status=result.status_code,
content_type=result.headers.get('Content-type', 'text/xml'))
except tinyows.CGIError, ex:
current_app.logger.error(ex)
abort(502)
|
{
"content_hash": "10eeaa309670bc509ee76a923e32ad83",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 155,
"avg_line_length": 36.08529411764706,
"alnum_prop": 0.6438177520580325,
"repo_name": "omniscale/gbi-server",
"id": "87bb41cf713c367247acaed8dd7b2937d89a50ec",
"size": "12923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/gbi_server/views/maps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19718"
},
{
"name": "HTML",
"bytes": "100537"
},
{
"name": "JavaScript",
"bytes": "46641"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "202205"
}
],
"symlink_target": ""
}
|
import fixtures
import six
from senlin.common import exception
from senlin.common.i18n import _
from senlin.tests.unit.common import base
class TestException(exception.SenlinException):
msg_fmt = _("Testing message %(text)s")
class TestSenlinException(base.SenlinTestCase):
def test_fatal_exception_error(self):
self.useFixture(fixtures.MonkeyPatch(
'senlin.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS',
True))
self.assertRaises(KeyError, TestException)
def test_format_string_error_message(self):
message = "This format %(message)s should work"
err = exception.Error(message)
self.assertEqual(message, six.text_type(err))
|
{
"content_hash": "650518236d43a7d5dd3bf59368d40ecd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 29.583333333333332,
"alnum_prop": 0.7112676056338029,
"repo_name": "Alzon/senlin",
"id": "a9ae58158b0a3618bd874f17f2741f6e17e1c96e",
"size": "1260",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/test_common_exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1931099"
},
{
"name": "Shell",
"bytes": "16531"
}
],
"symlink_target": ""
}
|
"""
This module defines a few common generators for slicing over arrays.
They are defined on ndarray, so they do not depend on Image.
* data_generator: return (item, data[item]) tuples from an iterable object
* slice_generator: return slices through an ndarray, possibly over many
indices
* f_generator: return a generator that applies a function to the
output of another generator
The above three generators return 2-tuples.
* write_data: write the output of a generator to an ndarray
* parcels: return binary array of the unique components of data
"""
import numpy as np
def parcels(data, labels=None, exclude=()):
""" Return a generator for ``[data == label for label in labels]``
If labels is None, labels = numpy.unique(data). Each label in labels can be
a sequence, in which case the value returned for that label union::
[numpy.equal(data, l) for l in label]
Parameters
----------
data : image or array-like
Either an image (with ``get_data`` method returning ndarray) or an
array-like
labels : iterable, optional
A sequence of labels for which to return indices within `data`. The
elements in `labels` can themselves be lists, tuples, in which case the
indices returned are for all values in `data` matching any of the items
in the list, tuple.
exclude : iterable, optional
Values in `labels` for which you do not want to return a parcel.
Returns
-------
gen : generator
generator yielding a array of boolean indices into `data` for which ``data
== label``, for each element in `label`.
Examples
--------
>>> for p in parcels([[1,1],[2,1]]):
... print p
...
[[ True True]
[False True]]
[[False False]
[ True False]]
>>> for p in parcels([[1,1],[2,3]], labels=[2,3]):
... print p
...
[[False False]
[ True False]]
[[False False]
[False True]]
>>> for p in parcels([[1,1],[2,3]], labels=[(2,3),2]):
... print p
...
[[False False]
[ True True]]
[[False False]
[ True False]]
"""
# Get image data or make array from array-like
try:
data = data.get_data()
except AttributeError:
data = np.asarray(data)
if labels is None:
labels = np.unique(data)
for label in labels:
if label in exclude:
continue
if type(label) not in [type(()), type([])]:
yield np.equal(data, label)
else:
v = 0
for l in label:
v += np.equal(data, l)
yield v.astype(bool)
def data_generator(data, iterable=None):
""" Return generator for ``[(i, data[i]) for i in iterable]``
If iterable is None, it defaults to range(data.shape[0])
Examples
--------
>>> a = np.asarray([[True,False],[False,True]])
>>> b = np.asarray([[False,False],[True,False]])
>>> for i, d in data_generator(np.asarray([[1,2],[3,4]]), [a,b]):
... print d
...
[1 4]
[3]
"""
data = np.asarray(data)
if iterable is None:
iterable = range(data.shape[0])
for index in iterable:
yield index, data[index]
def write_data(output, iterable):
""" Write (index, data) iterable to `output`
Write some data to `output`. Iterable should return 2-tuples of the form
index, data such that::
output[index] = data
makes sense.
Examples
--------
>>> a=np.zeros((2,2))
>>> write_data(a, data_generator(np.asarray([[1,2],[3,4]])))
>>> a
array([[ 1., 2.],
[ 3., 4.]])
"""
for index, data in iterable:
output[index] = data
def slice_generator(data, axis=0):
""" Return generator for yielding slices along `axis`
Examples
--------
>>> for i,d in slice_generator([[1,2],[3,4]]):
... print i, d
...
(0,) [1 2]
(1,) [3 4]
>>> for i,d in slice_generator([[1,2],[3,4]], axis=1):
... print i, d
...
(slice(None, None, None), 0) [1 3]
(slice(None, None, None), 1) [2 4]
"""
data = np.asarray(data)
if type(axis) is type(1):
for j in range(data.shape[axis]):
ij = (slice(None,None,None),)*axis + (j,)
yield ij, data[(slice(None,None,None),)*axis + (j,)]
elif type(axis) in [type(()),type([])]:
data = np.asarray(data)
# the total number of iterations to be made
nmax = np.product(np.asarray(data.shape)[axis])
# calculate the 'divmod' paramter which is used to work out
# which index to use to use for each axis during iteration
mods = np.cumprod(np.asarray(data.shape)[axis])
divs = [1] + list(mods[:-1])
# set up a full set of slices for the image, to be modified
# at each iteration
slices = [slice(0, s) for s in data.shape]
n = 0
while True:
if n >= nmax:
raise StopIteration
for (a, div, mod) in zip(axis, divs, mods):
x = n / div % mod
slices[a] = x
n += 1
yield slices, data[slices]
def f_generator(f, iterable):
""" Return a generator for ``[(i, f(x)) for i, x in iterable]``
Examples
--------
>>> for i, d in f_generator(lambda x: x**2, data_generator([[1,2],[3,4]])):
... print i, d
...
0 [1 4]
1 [ 9 16]
"""
for i, x in iterable:
yield i, np.asarray(f(x))
def slice_parcels(data, labels=None, axis=0):
"""
A generator for slicing through parcels and slices of data...
hmmm... a better description is needed
>>> x=np.array([[0,0,0,1],[0,1,0,1],[2,2,0,1]])
>>> for a in slice_parcels(x):
... print a, x[a]
...
((0,), array([ True, True, True, False], dtype=bool)) [0 0 0]
((0,), array([False, False, False, True], dtype=bool)) [1]
((1,), array([ True, False, True, False], dtype=bool)) [0 0]
((1,), array([False, True, False, True], dtype=bool)) [1 1]
((2,), array([False, False, True, False], dtype=bool)) [0]
((2,), array([False, False, False, True], dtype=bool)) [1]
((2,), array([ True, True, False, False], dtype=bool)) [2 2]
>>> for a in slice_parcels(x, axis=1):
... b, c = a
... print a, x[b][c]
...
((slice(None, None, None), 0), array([ True, True, False], dtype=bool)) [0 0]
((slice(None, None, None), 0), array([False, False, True], dtype=bool)) [2]
((slice(None, None, None), 1), array([ True, False, False], dtype=bool)) [0]
((slice(None, None, None), 1), array([False, True, False], dtype=bool)) [1]
((slice(None, None, None), 1), array([False, False, True], dtype=bool)) [2]
((slice(None, None, None), 2), array([ True, True, True], dtype=bool)) [0 0 0]
((slice(None, None, None), 3), array([ True, True, True], dtype=bool)) [1 1 1]
"""
for i, d in slice_generator(data, axis=axis):
for p in parcels(d, labels=labels):
yield (i, p)
def matrix_generator(img):
"""
From a generator of items (i, r), return
(i, rp) where rp is a 2d array with rp.shape = (r.shape[0], prod(r.shape[1:]))
"""
for i, r in img:
r.shape = (r.shape[0], np.product(r.shape[1:]))
yield i, r
def shape_generator(img, shape):
"""
From a generator of items (i, r), return
(i, r.reshape(shape))
"""
for i, r in img:
r.shape = shape
yield i, r
|
{
"content_hash": "35a0e87ac271d2490986d254a803bbff",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 84,
"avg_line_length": 30.072,
"alnum_prop": 0.5497472732109604,
"repo_name": "arokem/nipy",
"id": "d762a7249c41ec1527a9b40c6aa3f461280ce054",
"size": "7632",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipy/core/utils/generators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1601255"
},
{
"name": "C++",
"bytes": "999"
},
{
"name": "Makefile",
"bytes": "2786"
},
{
"name": "Matlab",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "3047221"
}
],
"symlink_target": ""
}
|
from cattr._compat import is_bare, is_py37, is_py38
if is_py37 or is_py38:
def change_type_param(cl, new_params):
if is_bare(cl):
return cl[new_params]
return cl.copy_with(new_params)
else:
def change_type_param(cl, new_params):
cl.__args__ = (new_params,)
return cl
|
{
"content_hash": "94bd9edd8e2eb11f459faf12433c0539",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 51,
"avg_line_length": 21.6,
"alnum_prop": 0.5925925925925926,
"repo_name": "Tinche/cattrs",
"id": "92a00fcd294ecc1dee2f1d16f1cf7c40c6bf17e0",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/_compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2290"
},
{
"name": "Python",
"bytes": "80520"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class EventsResult(Model):
"""An event query result.
:param aimessages: OData messages for this response.
:type aimessages: list[~azure.applicationinsights.models.ErrorInfo]
:param value:
:type value: ~azure.applicationinsights.models.EventsResultData
"""
_attribute_map = {
'aimessages': {'key': '@ai\\.messages', 'type': '[ErrorInfo]'},
'value': {'key': 'value', 'type': 'EventsResultData'},
}
def __init__(self, *, aimessages=None, value=None, **kwargs) -> None:
super(EventsResult, self).__init__(**kwargs)
self.aimessages = aimessages
self.value = value
|
{
"content_hash": "03d7d4db05c62648d6e70172053f6532",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6377025036818851,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3c66e4efaf267749b131f085de0a7a794abf0d5b",
"size": "1153",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_result_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from bambou import NURESTFetcher
class NUScheduledTestSuitesFetcher(NURESTFetcher):
""" Represents a NUScheduledTestSuites fetcher
Notes:
This fetcher enables to fetch NUScheduledTestSuite objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUScheduledTestSuite class that is managed.
Returns:
.NUScheduledTestSuite: the managed class
"""
from .. import NUScheduledTestSuite
return NUScheduledTestSuite
|
{
"content_hash": "ebd51274115efc5e0f76d8c57fb25cf5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 22.64,
"alnum_prop": 0.6431095406360424,
"repo_name": "nuagenetworks/vspk-python",
"id": "cb45a279293aee9cf7edfb8deefd80f4628b0802",
"size": "2177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vspk/v6/fetchers/nuscheduledtestsuites_fetcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
}
|
import re
import urllib.parse, urllib.request
# Address of the login web page
captivePortailUrl = "https://portail-promologis-lan.insa-toulouse.fr"\
":8001"
# Perform the connection process
# return the disconnection token if the connection is succesful
def connect(username, password):
parameters = {'accept':'Connexion',
'auth_user':username,
'auth_pass':password,
'redirurl':'https://www.kernel.org',
'checkbox_charte':'on'}
data = urllib.parse.urlencode(parameters).encode('ascii')
# Create and send http request
request = urllib.request.Request(captivePortailUrl,data)
httpResponse = urllib.request.urlopen(request)
htmlResponse = httpResponse.read().decode('ascii')
# Search for the disconnection token and return it
tokenExtractor = re.compile(r"NAME=\"logout_id\" TYPE=\"hidden\" "\
"VALUE=\"([0-9|a-z]+)\"")
matchResult = tokenExtractor.search(htmlResponse)
if(matchResult != None and len(matchResult.groups()) > 0):
return matchResult.groups()[0]
else:
raise RuntimeError("Cannot match HTML response. "\
"Maybe username/password are wrong,"\
" or login page/response have changed")
# Perform the disconnection process
def disconnect(disconnectionToken):
parameters = {'logout_id':disconnectionToken,
'logout':'Deconnection'}
data = urllib.parse.urlencode(parameters).encode('ascii')
# Create and send http request
request = urllib.request.Request(captivePortailUrl,data)
httpResponse = urllib.request.urlopen(request)
htmlResponse = httpResponse.read().decode('ascii')
responseChecker = re.compile("You have been disconnected.")
if(responseChecker.search(htmlResponse) == None):
raise RuntimeError("Cannot match HTML response. "\
"Maybe disconnection web page has changed"\
" or the token id was not correct")
if __name__ == '__main__':
token = connect('username','password')
disconnect(token)
|
{
"content_hash": "caf26035305f97636393f8e5032c5463",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 71,
"avg_line_length": 38.80357142857143,
"alnum_prop": 0.6336861481822366,
"repo_name": "remy-phelipot/PromoConnect",
"id": "34f3ae9fc7dac5cdfb74c24f000ff26dc9a4934a",
"size": "2173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12459"
}
],
"symlink_target": ""
}
|
"""Path related utilities."""
import os
import sys
from os.path import commonprefix, abspath, join, splitext
def sub_path(path, parent):
"""Returns the portion of ``path`` that lies under ``parent``"""
path = abspath(path)
parent = abspath(parent)
return path[len(commonprefix((path, parent))) + 1:]
def iter_files(ext_filter, path=".", exclude=None):
"""
An iterator returning all the files under *path* whose extension matches
*ext_filter*.
The optional *exclude* argument should be a list of paths prefixes to
exclude from the search.
"""
for dirpath, dirname, filenames in os.walk(path):
excluded = False
for excl_path in exclude:
if dirpath.startswith(excl_path):
excluded = True
break
if not excluded:
for fname in filenames:
name, ext = splitext(fname)
if ext == ext_filter:
yield join(dirpath, fname)
def file_mtime(path):
"""
Get the (correct) modification time of the file at *path*.
"""
stat = os.stat(path)
mtime = stat.st_mtime
if sys.platform == "win32":
mtime -= stat.st_ctime
return mtime
|
{
"content_hash": "88c91e52f436a8518c314c0a4e532c51",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 27.288888888888888,
"alnum_prop": 0.5977198697068404,
"repo_name": "flupke/pyflu",
"id": "a4829e956e6a16efa0bfed3a60aa9661f4013f98",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyflu/path.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "142316"
}
],
"symlink_target": ""
}
|
import sys
from inspect import getsourcefile
from os import path
CURRENT_DIR = path.dirname(path.abspath(getsourcefile(lambda: 0)))
sys.path.insert(0, CURRENT_DIR[: CURRENT_DIR.rfind(path.sep)])
import click # pylint: disable=wrong-import-position
from ghlint import config # pylint: disable=wrong-import-position
from ghlint import lint # pylint: disable=wrong-import-position
@click.command()
def main():
settings = config.settings()
lint.run(settings)
return
if __name__ == '__main__':
main()
|
{
"content_hash": "3f4cd718f6cbed921d2cd389c824dc5d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 23.59090909090909,
"alnum_prop": 0.7283236994219653,
"repo_name": "martinbuberl/ghlint",
"id": "f4725dab71f52ffad653ec06e31da53d72f282e2",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghlint/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "10196"
}
],
"symlink_target": ""
}
|
"""
WSGI config for nuremberg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nuremberg.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "e789f92710f18b47f1da2e994ddad09a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.071428571428573,
"alnum_prop": 0.7760814249363868,
"repo_name": "harvard-lil/nuremberg",
"id": "585f1a3326c1e30d6497f8a6517ad7a0dfc464fa",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "web/nuremberg/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "272812"
},
{
"name": "JavaScript",
"bytes": "81283"
},
{
"name": "Less",
"bytes": "253777"
},
{
"name": "Python",
"bytes": "167981"
}
],
"symlink_target": ""
}
|
n = int(raw_input())
res = 1
# Factorial in Python, easy because big integers :-P
for i in range(1,n+1):
res *= i
print res
|
{
"content_hash": "05e9c5e8f8aad6f39674c58fc9a531cf",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 52,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.640625,
"repo_name": "zubie7a/CPP",
"id": "f24937bcd2d5688511d4a8d0c67f5318f335c0e2",
"size": "190",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "HackerRank/Algorithms/02_Implementation/16_Extra_Long_Factorials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "290798"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.files.storage import get_storage_class
import path
def getRoot():
"""Convenience to return the media root with forward slashes"""
storage = get_storage_class()()
root = storage.location.replace("\\", "/")
if not root.endswith("/"):
root += "/"
return path.Path(root)
|
{
"content_hash": "bc0313f47be795c0bed587f6d8d4d1fc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 24.928571428571427,
"alnum_prop": 0.66189111747851,
"repo_name": "theiviaxx/Frog",
"id": "e0299191f7109e29510a56d85e2e9f70ccbcdf66",
"size": "1634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frog/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22225"
},
{
"name": "JavaScript",
"bytes": "57292"
},
{
"name": "Python",
"bytes": "215494"
}
],
"symlink_target": ""
}
|
from common import * # NOQA
from cattle import ApiError
SERVICE_KIND = 'kubernetesService'
def from_context(context):
return context.client, context.agent_client, context.host
def test_bad_agent(super_client, new_context):
_, account, agent_client = register_simulated_host(new_context,
return_agent=True)
def post():
external_id = random_str()
agent_client.create_external_storage_pool_event(
externalId=external_id,
eventType="storagepool.create",
hostUuids=[],
storagePool={
'name': 'name-%s' % external_id,
'externalId': external_id,
})
# Test it works
post()
# Test it fails with two agents
super_client.wait_success(super_client.create_agent(
uri='test://' + random_str(),
accountId=account.id))
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'MissingRequired'
# Test it fails with no agents
for agent in super_client.list_agent(accountId=account.id):
super_client.wait_success(agent.deactivate())
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'CantVerifyAgent'
def test_external_host_event_miss(new_context):
new_context.create_container()
client = new_context.client
host = new_context.host
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate',
deleteHost=True)
event = client.wait_success(event)
host = client.reload(host)
assert event.state == 'created'
assert host.state == 'active'
def test_external_host_event_wrong_event(new_context):
c = new_context.create_container()
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.notevacuate',
deleteHost=True)
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.state == 'active'
assert c.state == 'running'
def test_external_host_event_hit(new_context):
c = new_context.create_container()
client = new_context.client
host = client.wait_success(new_context.host)
host = client.update(host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate',
deleteHost=True)
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.removed is not None
assert c.removed is not None
def test_external_host_event_no_delete(new_context):
c = new_context.create_container()
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostLabel='foo=bar',
eventType='host.evacuate')
assert event.state == 'creating'
event = client.wait_success(event)
host = client.reload(host)
c = client.wait_success(c)
assert event.state == 'created'
assert host.state == 'inactive'
def test_external_host_event_by_id(new_context):
c = new_context.create_container()
new_host = register_simulated_host(new_context)
client = new_context.client
host = client.update(new_context.host, labels={
'foo': 'bar'
})
host = client.wait_success(host)
assert host.labels == {'foo': 'bar'}
event = client.create_external_host_event(hostId=host.id,
eventType='host.evacuate')
assert event.state == 'creating'
event = client.wait_success(event)
new_host = client.reload(new_host)
c = client.wait_success(c)
host = client.reload(host)
assert event.state == 'created'
assert host.state == 'inactive'
assert new_host.state == 'active'
def test_external_dns_event(super_client, new_context):
client, agent_client, host = from_context(new_context)
stack = client.create_environment(name=random_str())
stack = client.wait_success(stack)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc1 = client.create_service(name=random_str(),
environmentId=stack.id,
launchConfig=launch_config)
svc1 = client.wait_success(svc1)
domain_name1 = "foo.com"
create_dns_event(client, agent_client, super_client,
new_context, svc1.name,
stack.name, domain_name1)
# wait for dns name to be updated
svc1 = client.reload(svc1)
assert svc1.fqdn == domain_name1
def create_dns_event(client, agent_client, super_client,
context, svc_name1,
stack_name, domain_name):
external_id = random_str()
event_type = "externalDnsEvent"
dns_event = {
'externalId': external_id,
'eventType': event_type,
"stackName": stack_name,
"serviceName": svc_name1,
"fqdn": domain_name
}
event = agent_client.create_external_dns_event(dns_event)
assert event.externalId == external_id
assert event.eventType == event_type
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
assert event.reportedAccountId == context.agent.id
return event
def test_external_service_event_create(client, context, super_client):
agent_client = context.agent_client
env_external_id = random_str()
environment = {"name": "foo", "externalId": env_external_id}
svc_external_id = random_str()
svc_name = 'svc-name-%s' % svc_external_id
selector = 'foo=bar1'
template = {'foo': 'bar'}
svc_data = {
'selectorContainer': selector,
'kind': SERVICE_KIND,
'name': svc_name,
'externalId': svc_external_id,
'template': template,
}
event = agent_client.create_external_service_event(
eventType='service.create',
environment=environment,
externalId=svc_external_id,
service=svc_data,
)
event = wait_for(lambda: event_wait(client, event))
assert event is not None
svc = wait_for(lambda: service_wait(client, svc_external_id))
assert svc.externalId == svc_external_id
assert svc.name == svc_name
assert svc.kind == SERVICE_KIND
assert svc.selectorContainer == selector
assert svc.environmentId is not None
assert svc.template == template
envs = client.list_environment(externalId=env_external_id)
assert len(envs) == 1
assert envs[0].id == svc.environmentId
wait_for_condition(client, svc,
lambda x: x.state == 'active',
lambda x: 'State is: ' + x.state)
# Update
new_selector = 'newselector=foo'
svc_data = {
'selectorContainer': new_selector,
'kind': SERVICE_KIND,
'template': {'foo': 'bar'},
}
agent_client.create_external_service_event(
eventType='service.update',
environment=environment,
externalId=svc_external_id,
service=svc_data,
)
wait_for_condition(client, svc,
lambda x: x.selectorContainer == new_selector,
lambda x: 'Selector is: ' + x.selectorContainer)
# Delete
agent_client.create_external_service_event(
name=svc_name,
eventType='service.remove',
externalId=svc_external_id,
service={'kind': SERVICE_KIND},
)
wait_for_condition(client, svc,
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
def test_external_stack_event_create(client, context, super_client):
agent_client = context.agent_client
env_external_id = random_str()
environment = {"name": env_external_id, "externalId": env_external_id,
"kind": "environment"}
env = client.create_environment(environment)
env = client.wait_success(env)
service = {
'kind': SERVICE_KIND,
}
event = agent_client.create_external_service_event(
eventType='stack.remove',
environment=environment,
externalId=env_external_id,
service=service,
)
event = wait_for(lambda: event_wait(client, event))
assert event is not None
wait_for(lambda:
len(client.list_environment(externalId=env_external_id)) == 0)
def service_wait(client, external_id):
services = client.list_kubernetes_service(externalId=external_id)
if len(services) and services[0].state == 'active':
return services[0]
def event_wait(client, event):
created = client.by_id('externalEvent', event.id)
if created is not None and created.state == 'created':
return created
|
{
"content_hash": "eedf9e28f34aa91bc2ee178473ee24ba",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 75,
"avg_line_length": 30.671974522292995,
"alnum_prop": 0.6037794621534628,
"repo_name": "vincent99/cattle",
"id": "eb0c75d857ad7283ce33ca0419890878a5d60a55",
"size": "9631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration-v1/cattletest/core/test_external_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5271"
},
{
"name": "FreeMarker",
"bytes": "71"
},
{
"name": "Java",
"bytes": "6154830"
},
{
"name": "Makefile",
"bytes": "308"
},
{
"name": "Python",
"bytes": "1577441"
},
{
"name": "Shell",
"bytes": "38830"
}
],
"symlink_target": ""
}
|
import os
import urllib
import re
from flask import Blueprint, request, Response, abort
from flask_negotiate import consumes
from flask.ext.cors import cross_origin
import settings
from model.utils import Utils
from model.loader import PluginLoader
slack = Blueprint('slack', __name__, url_prefix='/api/slack')
@slack.route("/webhook", methods=['POST'])
@cross_origin()
@consumes('application/x-www-form-urlencoded')
def webhook():
# For debug
form = request.form
print form
# Check slack webhook token in request body
request_token = Utils().parse_dic(form, 'token', 400)
token = os.environ.get('SLACK_WEBHOOK_TOKEN')
if not token or token != request_token:
abort(401)
# Parse request body
username = Utils().parse_dic(form, 'user_name')
trigger_word_uni = Utils().parse_dic(form, 'trigger_word')
text_uni = Utils().parse_dic(form, 'text')
# Check trigger user is not bot
if not username or 'bot' in username:
dic = {}
return Response(Utils().dump_json(dic), mimetype='application/json')
botname = settings.BOTNAME
if not botname:
abort(500)
re_flags = settings.RE_FLAGS
plugins = PluginLoader().get_plugins()
content = None
kwargs = {
'text': text_uni,
'trigger_word': trigger_word_uni,
'botname': botname
}
for plugin in plugins:
regex_uni = Utils().convert_unicode(plugin().hear_regex(**kwargs))
print("Using plugin : %r" % plugin)
print("Using regex : %r" % regex_uni)
print("Target text : %r" % urllib.unquote_plus(text_uni))
if re.compile(regex_uni, re_flags).match(urllib.unquote_plus(text_uni)):
print("Regex found :)")
content = plugin().response(**kwargs)
break
if not content:
dic = {}
else:
if isinstance(content, dict) and (content.get('text') or content.get('attachments')):
dic = content
else:
dic = {"text": content}
dic["link_names"] = 1
return Response(Utils().dump_json(dic), mimetype='application/json')
|
{
"content_hash": "7d9bd4c6504238eaa1a00e51c7124c45",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 93,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6264150943396226,
"repo_name": "supistar/Botnyan",
"id": "8722de76b3202d5beb2a3385581b3d3465c55731",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/slack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20345"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django import http
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy as _
from mox3.mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class QuotaTests(test.APITestCase):
def get_usages(self, with_volume=True, with_compute=True,
nova_quotas_enabled=True):
usages = {}
if with_compute:
# These are all nova fields; the neutron ones are named slightly
# differently and aren't included in here yet
if nova_quotas_enabled:
usages.update({
'injected_file_content_bytes': {'quota': 1},
'metadata_items': {'quota': 1},
'injected_files': {'quota': 1},
'security_groups': {'quota': 10},
'security_group_rules': {'quota': 20},
'fixed_ips': {'quota': 10},
'ram': {'available': 8976, 'used': 1024, 'quota': 10000},
'floating_ips': {'available': 0, 'used': 2, 'quota': 1},
'instances': {'available': 8, 'used': 2, 'quota': 10},
'cores': {'available': 8, 'used': 2, 'quota': 10}
})
else:
inf = float('inf')
usages.update({
'security_groups': {'available': inf, 'quota': inf},
'ram': {'available': inf, 'used': 1024, 'quota': inf},
'floating_ips': {'available': inf, 'used': 2,
'quota': inf},
'instances': {'available': inf, 'used': 2, 'quota': inf},
'cores': {'available': inf, 'used': 2, 'quota': inf}
})
if with_volume:
usages.update({'volumes': {'available': 0, 'used': 4, 'quota': 1},
'snapshots': {'available': 0, 'used': 3,
'quota': 1},
'gigabytes': {'available': 880, 'used': 120,
'quota': 1000}})
return usages
def assertAvailableQuotasEqual(self, expected_usages, actual_usages):
expected_available = {key: value['available'] for key, value in
expected_usages.items() if 'available' in value}
actual_available = {key: value['available'] for key, value in
actual_usages.items() if 'available' in value}
self.assertEqual(expected_available, actual_available)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',
'floating_ip_supported'),
api.base: ('is_service_enabled',),
cinder: ('volume_list', 'volume_snapshot_list',
'tenant_quota_get',
'is_volume_service_enabled')})
def _test_tenant_quota_usages(self, nova_quotas_enabled=True,
with_compute=True, with_volume=True):
cinder.is_volume_service_enabled(IsA(http.HttpRequest)).AndReturn(
with_volume)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(
IsA(http.HttpRequest), 'compute'
).MultipleTimes().AndReturn(with_compute)
if with_compute:
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest),
search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
if nova_quotas_enabled:
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
if with_volume:
opts = {'all_tenants': 1,
'project_id': self.request.user.tenant_id}
cinder.volume_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.cinder_volume_snapshots.list())
cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages(
nova_quotas_enabled=nova_quotas_enabled, with_volume=with_volume,
with_compute=with_compute)
# Compare internal structure of usages to expected.
self.assertItemsEqual(expected_output, quota_usages.usages)
# Compare available resources
self.assertAvailableQuotasEqual(expected_output, quota_usages.usages)
def test_tenant_quota_usages(self):
self._test_tenant_quota_usages()
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'enable_quotas': False})
def test_tenant_quota_usages_wo_nova_quotas(self):
self._test_tenant_quota_usages(nova_quotas_enabled=False,
with_compute=True,
with_volume=False)
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'enable_quotas': False})
@test.create_stubs({api.base: ('is_service_enabled',),
cinder: ('is_volume_service_enabled',)})
def test_get_all_disabled_quotas(self):
cinder.is_volume_service_enabled(IsA(http.HttpRequest)).AndReturn(
False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
# Nova enabled but quotas disabled
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').AndReturn(True)
self.mox.ReplayAll()
result_quotas = quotas.get_disabled_quotas(self.request)
expected_quotas = list(quotas.CINDER_QUOTA_FIELDS) + \
list(quotas.NEUTRON_QUOTA_FIELDS) + \
list(quotas.NOVA_QUOTA_FIELDS) + list(quotas.MISSING_QUOTA_FIELDS)
self.assertItemsEqual(result_quotas, expected_quotas)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',
'floating_ip_supported'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
def test_tenant_quota_usages_without_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages(with_volume=False)
# Compare internal structure of usages to expected.
self.assertItemsEqual(expected_output, quota_usages.usages)
# Make sure that the `in` operator and the `.get()` method
# behave as expected
self.assertIn('ram', quota_usages)
self.assertIsNotNone(quota_usages.get('ram'))
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',
'floating_ip_supported'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
def test_tenant_quota_usages_no_instances_running(self):
api.cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn([])
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([[], False])
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages(with_volume=False)
expected_output.update({
'ram': {'available': 10000, 'used': 0, 'quota': 10000},
'floating_ips': {'available': 1, 'used': 0, 'quota': 1},
'instances': {'available': 10, 'used': 0, 'quota': 10},
'cores': {'available': 10, 'used': 0, 'quota': 10}})
# Compare internal structure of usages to expected.
self.assertItemsEqual(expected_output, quota_usages.usages)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',
'floating_ip_supported'),
api.base: ('is_service_enabled',),
cinder: ('volume_list', 'volume_snapshot_list',
'tenant_quota_get',
'is_volume_service_enabled')})
def test_tenant_quota_usages_unlimited_quota(self):
inf_quota = self.quotas.first()
inf_quota['ram'] = -1
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(inf_quota)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
opts = {'all_tenants': 1, 'project_id': self.request.user.tenant_id}
cinder.volume_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.cinder_volume_snapshots.list())
cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages()
expected_output.update({'ram': {'available': float("inf"),
'used': 1024,
'quota': float("inf")}})
# Compare internal structure of usages to expected.
self.assertItemsEqual(expected_output, quota_usages.usages)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',
'floating_ip_supported'),
api.base: ('is_service_enabled',),
cinder: ('volume_list', 'volume_snapshot_list',
'tenant_quota_get',
'is_volume_service_enabled')})
def test_tenant_quota_usages_neutron_fip_disabled(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(False)
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
opts = {'all_tenants': 1, 'project_id': self.request.user.tenant_id}
cinder.volume_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest), opts) \
.AndReturn(self.cinder_volume_snapshots.list())
cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages()
expected_output['floating_ips']['used'] = 0
expected_output['floating_ips']['available'] = 1
# Compare internal structure of usages to expected.
self.assertItemsEqual(expected_output, quota_usages.usages)
@test.create_stubs({cinder: ('volume_list',),
exceptions: ('handle',)})
def test_get_tenant_volume_usages_cinder_exception(self):
cinder.volume_list(IsA(http.HttpRequest)) \
.AndRaise(cinder.cinder_exception.ClientException('test'))
exceptions.handle(IsA(http.HttpRequest),
_("Unable to retrieve volume limit information."))
self.mox.ReplayAll()
quotas._get_tenant_volume_usages(self.request, {}, [], None)
@test.create_stubs({api.base: ('is_service_enabled',),
api.cinder: ('tenant_quota_get',
'is_volume_service_enabled'),
exceptions: ('handle',)})
def test_get_quota_data_cinder_exception(self):
api.cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').AndReturn(False)
api.cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndRaise(cinder.cinder_exception.ClientException('test'))
exceptions.handle(IsA(http.HttpRequest),
_("Unable to retrieve volume limit information."))
self.mox.ReplayAll()
quotas._get_quota_data(self.request, 'tenant_quota_get')
@test.create_stubs({api.base: ('is_service_enabled',),
api.cinder: ('tenant_absolute_limits',
'is_volume_service_enabled'),
exceptions: ('handle',)})
def test_tenant_limit_usages_cinder_exception(self):
api.base.is_service_enabled(IsA(http.HttpRequest),
'compute').AndReturn(False)
api.cinder.is_volume_service_enabled(
IsA(http.HttpRequest)
).AndReturn(True)
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndRaise(cinder.cinder_exception.ClientException('test'))
exceptions.handle(IsA(http.HttpRequest),
_("Unable to retrieve volume limit information."))
self.mox.ReplayAll()
quotas.tenant_limit_usages(self.request)
|
{
"content_hash": "d2e72e96744bc976a6ebe7263a658469",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 78,
"avg_line_length": 48.81201044386423,
"alnum_prop": 0.5451190157796202,
"repo_name": "bac/horizon",
"id": "25ea0a9cd3434f52d2ea64a16e5718c98ac34dde",
"size": "19521",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/tests/quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103495"
},
{
"name": "HTML",
"bytes": "542157"
},
{
"name": "JavaScript",
"bytes": "1720604"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5280278"
},
{
"name": "Shell",
"bytes": "19049"
}
],
"symlink_target": ""
}
|
from bokeh.io import show
from bokeh.models import CustomJS, Select
select = Select(title="Option:", value="foo", options=["foo", "bar", "baz", "quux"])
select.js_on_change("value", CustomJS(code="""
console.log('select: value=' + this.value, this.toString())
"""))
show(select)
|
{
"content_hash": "a1c48e6c254ef712b510201430c19208",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 84,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6736842105263158,
"repo_name": "bokeh/bokeh",
"id": "49aae60730032dfed26c384687ef7ceca6c17767",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/interaction/widgets/select_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup
settings = dict()
# Publish
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
settings.update(
name='django-heroku-mongoify',
version='0.2.0',
description='Friendly MongoDB for Django on Heroku',
long_description=open('README.rst').read(),
author='Andy Dirnberger',
author_email='dirn@dirnonline.com',
url='https://github.com/dirn/django-heroku-mongoify',
py_modules=['mongoify'],
package_data={'': ['LICENSE']},
include_package_data=True,
tests_require=['coverage', 'nose'],
license=open('LICENSE').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
setup(**settings)
|
{
"content_hash": "9a83ffe3631ab42894bb7fb54a23d6b1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 32.06976744186046,
"alnum_prop": 0.625090645395214,
"repo_name": "dirn/django-heroku-mongoify",
"id": "9fd32750b2b0ff074c09546d61b0ee7d900bbada",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13207"
},
{
"name": "Shell",
"bytes": "5128"
}
],
"symlink_target": ""
}
|
import html.entities
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.translation import gettext as _, gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
return x and force_text(x)[0].upper() + force_text(x)[1:]
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.S)
re_chars = re.compile(r'<.*?>|(.)', re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
Preserve all white space except added line breaks consume the space on
which they break the line.
Don't wrap long words, thus the output text may have lines longer than
``width``.
"""
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super().__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis
(...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncate HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Close opened tags if they were correctly closed in the given HTML.
Preserve newlines in the HTML.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer:
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return chr(c)
except ValueError:
return match.group(0)
else:
try:
return chr(html.entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(str, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lower case. Strip surrounding whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
|
{
"content_hash": "dd488ce60f784b9b92b7e7d57ec02848",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 90,
"avg_line_length": 32.86270022883295,
"alnum_prop": 0.5504491330687278,
"repo_name": "mjtamlyn/django",
"id": "0336b3fe795d9eb857413eaf35109a28b8bc9e77",
"size": "14361",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/utils/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "181561"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11847939"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python, and influenced by Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2007 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, types, time, string, cStringIO, traceback
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.0.2"
__date__ = "16 February 2007"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_traceback.tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates Handlers and so
#might arbitrary user threads. Since Handler.__init__() updates the shared
#dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord:
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and (len(args) == 1) and args[0] and (type(args[0]) == types.DictType):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except:
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.currentThread().getName()
else:
self.thread = None
self.threadName = None
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not hasattr(types, "UnicodeType"): #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if type(msg) not in (types.UnicodeType, types.StringType):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter:
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string contains
"%(asctime)", formatTime() is called to format the event time.
If there is exception information, it is formatted using
formatException() and appended to the message.
"""
record.message = record.getMessage()
if string.find(self._fmt,"%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter:
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter:
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif string.find(record.name, self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer:
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = {} #repository of handlers (for flushing when shutdown called)
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self.level = level
self.formatter = None
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
_handlers[self] = 1
_handlerList.insert(0, self)
finally:
_releaseLock()
self.createLock()
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = level
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError, 'emit must be implemented '\
'by Handler subclasses'
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version does removes the handler from an internal list
of handlers which is closed when shutdown() is called. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
del _handlers[self]
_handlerList.remove(self)
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions:
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, strm=None):
"""
Initialize the handler.
If strm is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if strm is None:
strm = sys.stderr
self.stream = strm
self.formatter = None
def flush(self):
"""
Flushes the stream.
"""
self.stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
if not hasattr(types, "UnicodeType"): #if no unicode support...
self.stream.write(fs % msg)
else:
try:
self.stream.write(fs % msg)
except UnicodeError:
self.stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None):
"""
Open the specified file and use it as the stream for logging.
"""
if codecs is None:
encoding = None
if encoding is None:
stream = open(filename, mode)
else:
stream = codecs.open(filename, mode, encoding)
StreamHandler.__init__(self, stream)
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
def close(self):
"""
Closes the stream.
"""
self.flush()
self.stream.close()
StreamHandler.close(self)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder:
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if not self.loggerMap.has_key(alogger):
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError, "logger not derived from logging.Logger: " + \
klass.__name__
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager:
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
_acquireLock()
try:
if self.loggerDict.has_key(name):
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = string.rfind(name, ".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if not self.loggerDict.has_key(substr):
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = string.rfind(name, ".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
#if string.find(c.parent.name, nm) <> 0:
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = level
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = level
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.manager.disable >= DEBUG:
return
if DEBUG >= self.getEffectiveLevel():
apply(self._log, (DEBUG, msg, args), kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.manager.disable >= INFO:
return
if INFO >= self.getEffectiveLevel():
apply(self._log, (INFO, msg, args), kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.manager.disable >= WARNING:
return
if self.isEnabledFor(WARNING):
apply(self._log, (WARNING, msg, args), kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.manager.disable >= ERROR:
return
if self.isEnabledFor(ERROR):
apply(self._log, (ERROR, msg, args), kwargs)
def exception(self, msg, *args):
"""
Convenience method for logging an ERROR with exception information.
"""
apply(self.error, (msg,) + args, {'exc_info': 1})
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.manager.disable >= CRITICAL:
return
if CRITICAL >= self.getEffectiveLevel():
apply(self._log, (CRITICAL, msg, args), kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if type(level) != types.IntType:
if raiseExceptions:
raise TypeError, "level must be an integer"
else:
return
if self.manager.disable >= level:
return
if self.isEnabledFor(level):
apply(self._log, (level, msg, args), kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe().f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
fn, lno, func = self.findCaller()
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if type(exc_info) != types.TupleType:
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
if hdlr in self.handlers:
#hdlr.close()
hdlr.acquire()
try:
self.handlers.remove(hdlr)
finally:
hdlr.release()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level:
root.setLevel(level)
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.critical, (msg,)+args, kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.error, (msg,)+args, kwargs)
def exception(msg, *args):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
apply(error, (msg,)+args, {'exc_info': 1})
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.warning, (msg,)+args, kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.info, (msg,)+args, kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.debug, (msg,)+args, kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
apply(root.log, (level, msg)+args, kwargs)
def disable(level):
"""
Disable all logging calls less severe than 'level'.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for h in handlerList[:]:
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h.flush()
h.close()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
try:
import atexit
atexit.register(shutdown)
except ImportError: # for Python versions < 2.0
def exithook(status, old_exit=sys.exit):
try:
shutdown()
finally:
old_exit(status)
sys.exit = exithook
|
{
"content_hash": "22f1064fa597b2ce2c95b2c4bed21940",
"timestamp": "",
"source": "github",
"line_count": 1357,
"max_line_length": 93,
"avg_line_length": 35.01473839351511,
"alnum_prop": 0.5617804903714616,
"repo_name": "ericlink/adms-server",
"id": "a07f5fa41e5893d649984c488f9e28130df98760",
"size": "48516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "playframework-dist/play-1.1/python/Lib/logging/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "408"
},
{
"name": "C",
"bytes": "152256"
},
{
"name": "CSS",
"bytes": "97486"
},
{
"name": "HTML",
"bytes": "553901"
},
{
"name": "Java",
"bytes": "3086962"
},
{
"name": "JavaScript",
"bytes": "736134"
},
{
"name": "Python",
"bytes": "15750302"
},
{
"name": "SQLPL",
"bytes": "10111"
},
{
"name": "Scala",
"bytes": "1432"
},
{
"name": "Shell",
"bytes": "1369"
}
],
"symlink_target": ""
}
|
"""
Multiple concurrent notifications
+++++++++++++++++++++++++++++++++
Send multiple SNMP notifications at once using the following options:
* SNMPv2c and SNMPv3
* with community name 'public'
* over IPv4/UDP
* send INFORM notification
* to multiple Managers
* with TRAP ID 'coldStart' specified as a MIB symbol
* include managed object information specified as var-bind objects pair
Here we tag each SNMP-COMMUNITY-MIB::snmpCommunityTable row
with the same tag as SNMP-TARGET-MIB::snmpTargetAddrTable row
what leads to excessive tables information.
Functionally similar to:
| $ snmptrap -v2c -c public demo.snmplabs.com 12345 1.3.6.1.6.3.1.1.5.2
| $ snmpinform -v2c -c public demo.snmplabs.com 12345 1.3.6.1.6.3.1.1.5.2
| $ snmptrap -v2c -c public demo.snmplabs.com 12345 1.3.6.1.6.3.1.1.5.2
"""#
import asyncio
from pysnmp.hlapi.asyncio import *
@asyncio.coroutine
def sendone(snmpEngine, hostname, notifyType):
errorIndication, errorStatus, \
errorIndex, varBinds = yield from sendNotification(
snmpEngine,
CommunityData('public', tag=hostname),
UdpTransportTarget((hostname, 162), tagList=hostname),
ContextData(),
notifyType,
NotificationType(
ObjectIdentity('1.3.6.1.6.3.1.1.5.2')
).addVarBinds(
('1.3.6.1.6.3.1.1.4.3.0', '1.3.6.1.4.1.20408.4.1.1.2'),
('1.3.6.1.2.1.1.1.0', OctetString('my system'))
)
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s: at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex)-1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
snmpEngine = SnmpEngine()
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.wait([sendone(snmpEngine, 'demo.snmplabs.com', 'trap'),
sendone(snmpEngine, 'demo.snmplabs.com', 'inform')])
)
|
{
"content_hash": "b15e3285d95496199bcc426328909b18",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 110,
"avg_line_length": 32.56666666666667,
"alnum_prop": 0.6581371545547595,
"repo_name": "filippog/pysnmp",
"id": "5dfdfdbc17469344f30005d6dd85ed0cfaf9f5a9",
"size": "1954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hlapi/asyncio/agent/ntforg/multiple-notifications-at-once.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "991968"
},
{
"name": "Shell",
"bytes": "686"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
import logging
logger = logging.getLogger(__name__)
def index(request):
return render(request, 'home/index.html', {})
|
{
"content_hash": "16f8c5dd5ad702ab4cb67164b914bcd4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.75,
"repo_name": "aldencolerain/boringmanclan",
"id": "60d45af86a161b9b5f2fc33df855bbe19d5ba58c",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/views/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3622"
},
{
"name": "Python",
"bytes": "20425"
},
{
"name": "Shell",
"bytes": "840"
}
],
"symlink_target": ""
}
|
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity the entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.ID is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.ID is not None:
data.update({'peopleRecognitionId': entity.ID})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
{
"content_hash": "e59ada85668c1ebdfc455b8ad1d46a7b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 108,
"avg_line_length": 31.880434782608695,
"alnum_prop": 0.5789294237981589,
"repo_name": "WalkingMachine/sara_behaviors",
"id": "9cc33722be21f4a63decc77b57a7f5aeabd29bf2",
"size": "2972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara_flexbe_states/src/sara_flexbe_states/WonderlandPatchPerson.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "6456"
},
{
"name": "CMake",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "905600"
},
{
"name": "Shell",
"bytes": "2661"
}
],
"symlink_target": ""
}
|
import os
from types import ModuleType
from .bdist import BDist
from .develop import Develop
from .installed import Installed
from .sdist import SDist
from .wheel import Wheel
def get_metadata(path_or_module, metadata_version=None):
""" Try to create a Distribution 'path_or_module'.
o 'path_or_module' may be a module object.
o If a string, 'path_or_module' may point to an sdist file, a bdist
file, an installed package, or a working checkout (if it contains
PKG-INFO).
o Return None if 'path_or_module' can't be parsed.
"""
if isinstance(path_or_module, ModuleType):
try:
return Installed(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
try:
__import__(path_or_module)
except ImportError:
pass
else:
try:
return Installed(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
if os.path.isfile(path_or_module):
try:
return SDist(path_or_module, metadata_version)
except (ValueError, IOError):
pass
try:
return BDist(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
try:
return Wheel(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
if os.path.isdir(path_or_module):
try:
return Wheel(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
try:
return Develop(path_or_module, metadata_version)
except (ValueError, IOError): #pragma NO COVER
pass
|
{
"content_hash": "b2e3c5dda67d1c0e2a515227e05f5a23",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 71,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.6142217245240762,
"repo_name": "sonntagsgesicht/regtest",
"id": "306630ab3c8e3c03b911d6e4ef7c720fb3bce19a",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/pkginfo/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
}
|
import vim
from sys import version_info
import re
from os.path import abspath, basename, dirname, expanduser, splitext, isfile, relpath, exists, join
from os import remove, mkdir
from shutil import move
from glob import glob
from .timestamps import timestamp
from .vim_interface import *
from .modelines import format_modeline
from .utils import U
def update():
if not bool(int(vim.eval('exists("b:pad_modified")'))):
return
modified = bool(int(vim.eval("b:pad_modified")))
can_rename = get_setting("rename_files", bool)
if modified and can_rename:
_id = PadInfo(vim.current.buffer).id
old_path = expanduser(vim.current.buffer.name)
# if the file already has an extension
ext = splitext(old_path)[1]
if ext != '' and ext != get_setting("default_file_extension"):
return
fs = list(filter(isfile, \
glob(expanduser(join(dirname(vim.current.buffer.name), _id)) + "*")))
if old_path not in fs:
if fs == []:
new_path = expanduser(join(get_save_dir(), _id))
else:
exts = map(lambda i: '0' if i == '' else i[1:], \
map(lambda i: splitext(i)[1], fs))
new_path = ".".join([
expanduser(join(get_save_dir(), _id)),
str(int(max(exts)) + 1)])
new_path = new_path + vim.eval("g:pad#default_file_extension")
V + "bwipeout"
move(old_path, new_path)
def delete():
path = vim.current.buffer.name
if exists(path):
confirm = vim.eval('input("really delete? (y/n): ")')
if confirm.lower() == "y":
remove(path)
V + "bdelete!"
V + "redraw!"
def add_modeline():
mode = vim.eval('input("filetype: ", "", "filetype")')
if mode:
args = [format_modeline(mode)]
if get_setting('modeline_position') == 'top':
args.append(0)
vim.current.buffer.append(*args)
V + ("set filetype=" + mode)
V + "set nomodified"
def move_to_folder(path=None):
if path is None:
path = vim.eval("input('move to: ')")
new_path = join(get_save_dir(), path, basename(vim.current.buffer.name))
if not exists(join(get_save_dir(), path)):
mkdir(join(get_save_dir(), path))
try:
move(vim.current.buffer.name, new_path)
except IOError as e:
if e.errno == 20:
V + "redraw"
V + "echom 'vim-pad: cannot use that path'"
return
V + "bdelete"
def move_to_savedir():
move_to_folder("")
def archive():
move_to_folder("archive")
def unarchive():
move_to_savedir()
def isfileobject(o):
if version_info.major == 2:
return isinstance(o, file)
elif version_info.major == 3:
import io
return isinstance(o, io.IOBase)
class PadInfo(object):
__slots__ = "id", "summary", "body", "isEmpty", "folder"
def __init__(self, source):
"""
source can be:
* a vim buffer
* a file object
* a list of strings, one per line
"""
nchars = int(vim.eval("g:pad#read_nchars_from_files"))
self.summary = ""
self.body = ""
self.isEmpty = True
self.folder = ""
self.id = timestamp()
if source is vim.current.buffer:
source = source[:10]
elif isfileobject(source):
save_dir = get_save_dir()
if abspath(source.name).startswith(save_dir):
pos = len(get_save_dir()), len(basename(source.name))
self.folder = abspath(source.name)[pos[0]:-pos[1]]
else:
self.folder = dirname(relpath(source.name, vim.eval('getcwd()')))
if vim.eval("g:pad#title_first_line") == '1':
source = source.readline().split("\n")
elif nchars > 0:
source = source.read(nchars).split('\n')
else:
source = [basename(source.name)]
data = [line.strip() for line in source if line != ""]
if data != []:
# we discard modelines
if re.match("^.* vim: set .*:.*$", data[0]):
data = data[1:]
self.summary = data[0].strip()
org_tags_data = None
if len(self.summary) > 0:
# vim-orgmode adds tags after whitespace
org_tags_data = re.search("\s+(?P<tags>:.*$)", self.summary)
if org_tags_data:
self.summary = re.sub("\s+:.*$", "", self.summary)
if self.summary[0] in ("%", "#"): # pandoc and markdown titles
self.summary = str(self.summary[1:]).strip()
self.body = U(u'\u21b2').join(data[1:]).strip()
# if we have orgmode tag data, add it to the body
if org_tags_data:
self.body = ' '.join(\
[" ".join(\
map(lambda a: "@" + a, \
filter(lambda a: a != "", \
org_tags_data.group("tags").split(":")))), \
self.body])
# remove extra spaces in bodies
self.body = re.sub("\s{2,}", "", str(self.body))
if self.summary != "":
self.isEmpty = False
self.id = self.summary.lower().replace(" ", "_")
# remove ilegal characters from names (using rules for windows
# systems to err on the side of precaution)
self.id = re.sub("[*:<>/\|^]", "", self.id)
if self.id.startswith("."):
self.id = re.sub("^\.*", "", self.id)
|
{
"content_hash": "868e78ff83639f907104a65a7926a5b1",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 99,
"avg_line_length": 34.47904191616767,
"alnum_prop": 0.5078152136158388,
"repo_name": "fmoralesc/vim-pad",
"id": "6d68b3df662d2559afd1b67719db2682fb4348ac",
"size": "5758",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "pythonx/pad/pad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32568"
},
{
"name": "Vim script",
"bytes": "12256"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from flask_restless import APIManager
from keepmydevices import app, db
from models import Device, User
from flask_jwt import JWT, jwt_required
from werkzeug.security import check_password_hash
from flask import request, json, abort, jsonify
def authenticate(username, password):
user = User.query.filter(User.login == username).scalar()
if user and check_password_hash(user.password, password):
return user
return None
def identity(payload):
return User.query.filter(User.id == payload['identity']).scalar()
# return User.query.filter(User.login == "admin").first()
jwt = JWT(app, authenticate, identity)
@jwt_required()
def protected(**kw):
pass
def init_api():
api_mgr = APIManager(app, flask_sqlalchemy_db=db)
api_mgr.create_api(Device,
methods=['GET', 'POST', 'PUT'],
url_prefix='/api/v1',
preprocessors=dict(GET_SINGLE=[protected], GET_MANY=[protected]),
include_columns=['id', 'sn', 'brand', 'model', 'latitude', 'longitude'],
primary_key='sn')
# Customize API
@app.route("/api/v1/update", methods=['GET', 'POST'])
@jwt_required()
def update_device():
if request.method == 'POST':
info = request.json
if not info['sn']:
abort(400)
device = Device.query.filter(Device.sn == info['sn']).scalar()
if device:
app.logger.debug("Device already exists!")
if info.get('brand'):
device.brand = info.get('brand')
if info.get('model'):
device.model = info.get('model')
device.latitude = info.get('latitude')
device.longitude = info.get('longitude')
device.timestamp = datetime.now()
else:
app.logger.debug("Device doesn't exist!")
device = Device(sn=info['sn'])
device.brand = info.get('brand')
device.model = info.get('model')
device.latitude = info.get('latitude')
device.longitude = info.get('longitude')
device.timestamp = datetime.now()
db.session.add(device)
db.session.commit()
return jsonify(device.to_json())
else:
return "Please use 'post' method!"
|
{
"content_hash": "1c32703571c4b12a2e344b8be5732b34",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 95,
"avg_line_length": 31.513513513513512,
"alnum_prop": 0.5913379073756432,
"repo_name": "MiloJiang/KeepMyDevices",
"id": "be8660a2564d6f4056dfcf5229301acd41743703",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/keepmydevices/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7631"
},
{
"name": "HTML",
"bytes": "3883"
},
{
"name": "Java",
"bytes": "14438"
},
{
"name": "Python",
"bytes": "13852"
},
{
"name": "Swift",
"bytes": "9790"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from configurable import Configurable
from bucket import Bucket
#***************************************************************
class Metabucket(Configurable):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
self._n_bkts = kwargs.pop('n_bkts', None)
super(Metabucket, self).__init__(*args, **kwargs)
if self._n_bkts is None:
self._n_bkts = super(Metabucket, self).n_bkts
self._buckets = [Bucket(self._config, name='Sents-%d' % i) for i in xrange(self.n_bkts)]
self._sizes = None
self._data = None
self._len2bkt = None
return
#=============================================================
def reset(self, sizes):
""""""
self._data = []
self._sizes = sizes
self._len2bkt = {}
prev_size = -1
for bkt_idx, size in enumerate(sizes):
self._buckets[bkt_idx].reset(size)
self._len2bkt.update(zip(range(prev_size+1, size+1), [bkt_idx]*(size-prev_size)))
prev_size=size
return
#=============================================================
def add(self, sent):
""""""
if isinstance(self._data, np.ndarray):
raise TypeError("The buckets have already been finalized, you can't add more to them")
bkt_idx = self._len2bkt[len(sent)]
idx = self._buckets[bkt_idx].add(sent)
self._data.append( (bkt_idx, idx) )
return
#=============================================================
def _finalize(self):
""""""
for bucket in self:
bucket._finalize()
self._data = np.array(self._data)
return
#=============================================================
@property
def n_bkts(self):
return self._n_bkts
@property
def data(self):
return self._data
@property
def size(self):
return self.data.shape[0]
#=============================================================
def __iter__(self):
return (bucket for bucket in self._buckets)
def __getitem__(self, key):
return self._buckets[key]
def __len__(self):
return len(self._buckets)
|
{
"content_hash": "93a27b9b348b76673ff7c4e340305b04",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 92,
"avg_line_length": 28.1125,
"alnum_prop": 0.48421520675855934,
"repo_name": "Northeaster/TargetSentimentAnalysis",
"id": "cf086318523253eb11ce1bd444ce877d9b558d63",
"size": "2296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metabucket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "53523"
},
{
"name": "Python",
"bytes": "229837"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.