text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="scatter.marker.colorbar",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
{
"content_hash": "c5ce9043b351630b31debee1b60e1fe6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 30.5,
"alnum_prop": 0.6086065573770492,
"repo_name": "plotly/plotly.py",
"id": "0db06d828e6d858a8560090d527e25cdb3ba0a7e",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/marker/colorbar/_separatethousands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from ..dbexceptions import DependencyError
from relational import RelationalWarehouse #@UnresolvedImport
from ..library.database import LibraryDb
class PostgresWarehouse(RelationalWarehouse):
def create(self):
self.database.create()
self.database.connection.execute('CREATE SCHEMA IF NOT EXISTS library;')
self.library.database.create()
def drop_user(self, u):
e = self.database.connection.execute
try: e("DROP SCHEMA {} CASCADE;".format(u))
except: pass
try: e("DROP OWNED BY {}".format(u))
except: pass
try: e("DROP ROLE {}".format(u))
except: pass
def create_user(self, u):
e = self.database.connection.execute
e("CREATE ROLE {0} LOGIN PASSWORD '{0}'".format(u))
e("CREATE SCHEMA {0} AUTHORIZATION {0};".format(u))
e("ALTER ROLE {0} SET search_path TO library,public,{0};".format(u))
# From http://stackoverflow.com/a/8247052
e("GRANT SELECT ON ALL TABLES IN SCHEMA public TO {}".format(u))
e("""ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT SELECT ON TABLES TO {}; """.format(u))
e("GRANT SELECT, USAGE ON ALL SEQUENCES IN SCHEMA public TO {}".format(u))
e("""ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT SELECT, USAGE ON SEQUENCES TO {}""".format(u))
def users(self):
q = """SELECT
u.usename AS "name",
u.usesysid AS "id",
u.usecreatedb AS "createdb",
u.usesuper AS "superuser"
FROM pg_catalog.pg_user u
ORDER BY 1;"""
return { row['name']:dict(row) for row
in self.database.connection.execute(q) }
def table_meta(self, d_vid, p_vid, table_name):
'''Get the metadata directly from the database. This requires that
table_name be the same as the table as it is in stalled in the database'''
self.library.database.session.execute("SET search_path TO library")
return super(PostgresWarehouse, self).table_meta(d_vid, p_vid, table_name)
def remove_by_name(self,name):
'''Call the parent, then remove CSV partitions'''
from ..bundle import LibraryDbBundle
from ..identity import PartitionNameQuery
super(PostgresWarehouse, self).remove_by_name(name)
dataset = self.get(name)
if dataset.partition:
b = LibraryDbBundle(self.library.database, dataset.vid)
p = b.partitions.find(PartitionNameQuery(id_=dataset.partition.id_))
for p in p.get_csv_parts():
super(PostgresWarehouse, self).remove_by_name(p.identity.vname)
def _ogr_args(self, partition):
db = self.database
ogr_dsn = ("PG:'dbname={dbname} user={username} host={host} password={password}'"
.format(username=db.username, password=db.password,
host=db.server, dbname=db.dbname))
return ["-f PostgreSQL", ogr_dsn,
partition.database.path,
"--config PG_USE_COPY YES"]
def _copy_command(self, table, url):
template = """COPY "public"."{table}" FROM PROGRAM 'curl -s -L --compressed "{url}"' WITH ( FORMAT csv )"""
return template.format(table=table, url=url)
def load_local(self, partition, table_name):
return self.load_insert(partition, table_name)
def load_remote(self, partition, table_name, urls):
self.logger.log('install_partition_csv {}'.format(partition.identity.name))
pdb = partition.database
sqla_table, meta = self.create_table(partition.identity, table_name)
for url in urls:
self.logger.log('install_csv_url {}'.format(url))
cmd = self._copy_command(sqla_table, url)
self.logger.log('installing with command: {} '.format(cmd))
r = self.database.connection.execute(cmd)
#self.logger.log('installed_csv_url {}'.format(url))
r = self.database.connection.execute('commit')
def install_view(self, view_text):
print '!!!!', view_text
return
e = self.database.connection.execute
e(view_text)
|
{
"content_hash": "8541b70dc9b1679ef2f9336212903977",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 118,
"avg_line_length": 31.027397260273972,
"alnum_prop": 0.5918322295805739,
"repo_name": "kball/ambry",
"id": "2069457079653a125d1cbd679d67c229e4b2060f",
"size": "4530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambry/warehouse/postgres.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1229770"
},
{
"name": "Ruby",
"bytes": "2885"
},
{
"name": "Shell",
"bytes": "16552"
}
],
"symlink_target": ""
}
|
from django.db import models
class Book(models.Model):
"""
My first book model
"""
title = models.CharField(
max_length=255,
verbose_name=u'Book Title')
abstract = models.TextField(
verbose_name=u'Abstract')
price_net = models.DecimalField(
max_digits=8,
decimal_places=2,
verbose_name=u'Price Net')
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'Book'
verbose_name_plural = u'Books'
ordering = ('title', )
|
{
"content_hash": "900113c7318a2567adb5ef90be3d59a4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 38,
"avg_line_length": 21.88,
"alnum_prop": 0.5776965265082267,
"repo_name": "stephanpoetschner/django-vienna",
"id": "11c81885746e153b893319d1dfc56ad7f2a75796",
"size": "547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "2014/03/django-rest-framework/src/demo1/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131200"
},
{
"name": "HTML",
"bytes": "51555"
},
{
"name": "JavaScript",
"bytes": "215136"
},
{
"name": "Python",
"bytes": "85666"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import ctypes
import random
import pickle
import warnings
import numba
import numpy as np
from numba import (float32, float64, int16, int32, boolean, deferred_type,
optional)
from numba import njit, typeof
from numba.core import types, errors
from numba.core.dispatcher import Dispatcher
from numba.core.errors import LoweringError
from numba.core.runtime.nrt import MemInfo
from numba.experimental import jitclass
from numba.experimental.jitclass import _box
from numba.tests.support import TestCase, MemoryLeakMixin
import unittest
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
def _get_meminfo(box):
ptr = _box.box_get_meminfoptr(box)
mi = MemInfo(ptr)
mi.acquire()
return mi
class TestJitClass(TestCase, MemoryLeakMixin):
def _check_spec(self, spec, test_cls=None):
if test_cls is None:
@jitclass(spec)
class Test(object):
def __init__(self):
pass
test_cls = Test
clsty = test_cls.class_type.instance_type
names = list(clsty.struct.keys())
values = list(clsty.struct.values())
if isinstance(spec, OrderedDict):
all_expected = spec.items()
else:
all_expected = spec
self.assertEqual(len(names), len(spec))
for got, expected in zip(zip(names, values), all_expected):
self.assertEqual(got[0], expected[0])
self.assertEqual(got[1], expected[1])
def test_ordereddict_spec(self):
spec = OrderedDict()
spec['x'] = int32
spec['y'] = float32
self._check_spec(spec)
def test_list_spec(self):
spec = [('x', int32),
('y', float32)]
self._check_spec(spec)
def test_type_annotations(self):
spec = [('x', int32)]
@jitclass(spec)
class Test(object):
y: int
def __init__(self):
pass
self._check_spec(spec, Test)
def test_spec_errors(self):
spec1 = [('x', int), ('y', float32[:])]
spec2 = [(1, int32), ('y', float32[:])]
class Test(object):
def __init__(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass(spec1)(Test)
self.assertIn("spec values should be Numba type instances",
str(raises.exception))
with self.assertRaises(TypeError) as raises:
jitclass(spec2)(Test)
self.assertEqual(str(raises.exception),
"spec keys should be strings, got 1")
def test_init_errors(self):
@jitclass([])
class Test:
def __init__(self):
return 7
with self.assertRaises(errors.TypingError) as raises:
Test()
self.assertIn("__init__() should return None, not",
str(raises.exception))
def _make_Float2AndArray(self):
spec = OrderedDict()
spec['x'] = float32
spec['y'] = float32
spec['arr'] = float32[:]
@jitclass(spec)
class Float2AndArray(object):
def __init__(self, x, y, arr):
self.x = x
self.y = y
self.arr = arr
def add(self, val):
self.x += val
self.y += val
return val
return Float2AndArray
def _make_Vector2(self):
spec = OrderedDict()
spec['x'] = int32
spec['y'] = int32
@jitclass(spec)
class Vector2(object):
def __init__(self, x, y):
self.x = x
self.y = y
return Vector2
def test_jit_class_1(self):
Float2AndArray = self._make_Float2AndArray()
Vector2 = self._make_Vector2()
@njit
def bar(obj):
return obj.x + obj.y
@njit
def foo(a):
obj = Float2AndArray(1, 2, a)
obj.add(123)
vec = Vector2(3, 4)
return bar(obj), bar(vec), obj.arr
inp = np.ones(10, dtype=np.float32)
a, b, c = foo(inp)
self.assertEqual(a, 123 + 1 + 123 + 2)
self.assertEqual(b, 3 + 4)
self.assertPreciseEqual(c, inp)
def test_jitclass_usage_from_python(self):
Float2AndArray = self._make_Float2AndArray()
@njit
def identity(obj):
return obj
@njit
def retrieve_attributes(obj):
return obj.x, obj.y, obj.arr
arr = np.arange(10, dtype=np.float32)
obj = Float2AndArray(1, 2, arr)
obj_meminfo = _get_meminfo(obj)
self.assertEqual(obj_meminfo.refcount, 2)
self.assertEqual(obj_meminfo.data, _box.box_get_dataptr(obj))
self.assertEqual(obj._numba_type_.class_type,
Float2AndArray.class_type)
# Use jit class instance in numba
other = identity(obj)
other_meminfo = _get_meminfo(other) # duplicates MemInfo object to obj
self.assertEqual(obj_meminfo.refcount, 4)
self.assertEqual(other_meminfo.refcount, 4)
self.assertEqual(other_meminfo.data, _box.box_get_dataptr(other))
self.assertEqual(other_meminfo.data, obj_meminfo.data)
# Check dtor
del other, other_meminfo
self.assertEqual(obj_meminfo.refcount, 2)
# Check attributes
out_x, out_y, out_arr = retrieve_attributes(obj)
self.assertEqual(out_x, 1)
self.assertEqual(out_y, 2)
self.assertIs(out_arr, arr)
# Access attributes from python
self.assertEqual(obj.x, 1)
self.assertEqual(obj.y, 2)
self.assertIs(obj.arr, arr)
# Access methods from python
self.assertEqual(obj.add(123), 123)
self.assertEqual(obj.x, 1 + 123)
self.assertEqual(obj.y, 2 + 123)
# Setter from python
obj.x = 333
obj.y = 444
obj.arr = newarr = np.arange(5, dtype=np.float32)
self.assertEqual(obj.x, 333)
self.assertEqual(obj.y, 444)
self.assertIs(obj.arr, newarr)
def test_jitclass_datalayout(self):
spec = OrderedDict()
# Boolean has different layout as value vs data
spec['val'] = boolean
@jitclass(spec)
class Foo(object):
def __init__(self, val):
self.val = val
self.assertTrue(Foo(True).val)
self.assertFalse(Foo(False).val)
def test_deferred_type(self):
node_type = deferred_type()
spec = OrderedDict()
spec['data'] = float32
spec['next'] = optional(node_type)
@njit
def get_data(node):
return node.data
@jitclass(spec)
class LinkedNode(object):
def __init__(self, data, next):
self.data = data
self.next = next
def get_next_data(self):
# use deferred type as argument
return get_data(self.next)
def append_to_tail(self, other):
cur = self
while cur.next is not None:
cur = cur.next
cur.next = other
node_type.define(LinkedNode.class_type.instance_type)
first = LinkedNode(123, None)
self.assertEqual(first.data, 123)
self.assertIsNone(first.next)
second = LinkedNode(321, first)
first_meminfo = _get_meminfo(first)
second_meminfo = _get_meminfo(second)
self.assertEqual(first_meminfo.refcount, 3)
self.assertEqual(second.next.data, first.data)
self.assertEqual(first_meminfo.refcount, 3)
self.assertEqual(second_meminfo.refcount, 2)
# Test using deferred type as argument
first_val = second.get_next_data()
self.assertEqual(first_val, first.data)
# Check setattr (issue #2606)
self.assertIsNone(first.next)
second.append_to_tail(LinkedNode(567, None))
self.assertIsNotNone(first.next)
self.assertEqual(first.next.data, 567)
self.assertIsNone(first.next.next)
second.append_to_tail(LinkedNode(678, None))
self.assertIsNotNone(first.next.next)
self.assertEqual(first.next.next.data, 678)
# Check ownership
self.assertEqual(first_meminfo.refcount, 3)
del second, second_meminfo
self.assertEqual(first_meminfo.refcount, 2)
def test_c_structure(self):
spec = OrderedDict()
spec['a'] = int32
spec['b'] = int16
spec['c'] = float64
@jitclass(spec)
class Struct(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
st = Struct(0xabcd, 0xef, 3.1415)
class CStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_int32),
('b', ctypes.c_int16),
('c', ctypes.c_double),
]
ptr = ctypes.c_void_p(_box.box_get_dataptr(st))
cstruct = ctypes.cast(ptr, ctypes.POINTER(CStruct))[0]
self.assertEqual(cstruct.a, st.a)
self.assertEqual(cstruct.b, st.b)
self.assertEqual(cstruct.c, st.c)
def test_is(self):
Vector = self._make_Vector2()
vec_a = Vector(1, 2)
@njit
def do_is(a, b):
return a is b
with self.assertRaises(LoweringError) as raises:
# trigger compilation
do_is(vec_a, vec_a)
self.assertIn('no default `is` implementation', str(raises.exception))
def test_isinstance(self):
Vector2 = self._make_Vector2()
vec = Vector2(1, 2)
self.assertIsInstance(vec, Vector2)
def test_subclassing(self):
Vector2 = self._make_Vector2()
with self.assertRaises(TypeError) as raises:
class SubV(Vector2):
pass
self.assertEqual(str(raises.exception),
"cannot subclass from a jitclass")
def test_base_class(self):
class Base(object):
def what(self):
return self.attr
@jitclass([('attr', int32)])
class Test(Base):
def __init__(self, attr):
self.attr = attr
obj = Test(123)
self.assertEqual(obj.what(), 123)
def test_globals(self):
class Mine(object):
constant = 123
def __init__(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass(())(Mine)
self.assertEqual(str(raises.exception),
"class members are not yet supported: constant")
def test_user_getter_setter(self):
@jitclass([('attr', int32)])
class Foo(object):
def __init__(self, attr):
self.attr = attr
@property
def value(self):
return self.attr + 1
@value.setter
def value(self, val):
self.attr = val - 1
foo = Foo(123)
self.assertEqual(foo.attr, 123)
# Getter
self.assertEqual(foo.value, 123 + 1)
# Setter
foo.value = 789
self.assertEqual(foo.attr, 789 - 1)
self.assertEqual(foo.value, 789)
# Test nopython mode usage of getter and setter
@njit
def bar(foo, val):
a = foo.value
foo.value = val
b = foo.value
c = foo.attr
return a, b, c
a, b, c = bar(foo, 567)
self.assertEqual(a, 789)
self.assertEqual(b, 567)
self.assertEqual(c, 567 - 1)
def test_user_deleter_error(self):
class Foo(object):
def __init__(self):
pass
@property
def value(self):
return 1
@value.deleter
def value(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass([])(Foo)
self.assertEqual(str(raises.exception),
"deleter is not supported: value")
def test_name_shadowing_error(self):
class Foo(object):
def __init__(self):
pass
@property
def my_property(self):
pass
def my_method(self):
pass
with self.assertRaises(NameError) as raises:
jitclass([('my_property', int32)])(Foo)
self.assertEqual(str(raises.exception), 'name shadowing: my_property')
with self.assertRaises(NameError) as raises:
jitclass([('my_method', int32)])(Foo)
self.assertEqual(str(raises.exception), 'name shadowing: my_method')
def test_distinct_classes(self):
# Different classes with the same names shouldn't confuse the compiler
@jitclass([('x', int32)])
class Foo(object):
def __init__(self, x):
self.x = x + 2
def run(self):
return self.x + 1
FirstFoo = Foo
@jitclass([('x', int32)])
class Foo(object):
def __init__(self, x):
self.x = x - 2
def run(self):
return self.x - 1
SecondFoo = Foo
foo = FirstFoo(5)
self.assertEqual(foo.x, 7)
self.assertEqual(foo.run(), 8)
foo = SecondFoo(5)
self.assertEqual(foo.x, 3)
self.assertEqual(foo.run(), 2)
def test_parameterized(self):
class MyClass(object):
def __init__(self, value):
self.value = value
def create_my_class(value):
cls = jitclass([('value', typeof(value))])(MyClass)
return cls(value)
a = create_my_class(123)
self.assertEqual(a.value, 123)
b = create_my_class(12.3)
self.assertEqual(b.value, 12.3)
c = create_my_class(np.array([123]))
np.testing.assert_equal(c.value, [123])
d = create_my_class(np.array([12.3]))
np.testing.assert_equal(d.value, [12.3])
def test_protected_attrs(self):
spec = {
'value': int32,
'_value': float32,
'__value': int32,
'__value__': int32,
}
@jitclass(spec)
class MyClass(object):
def __init__(self, value):
self.value = value
self._value = value / 2
self.__value = value * 2
self.__value__ = value - 1
@property
def private_value(self):
return self.__value
@property
def _inner_value(self):
return self._value
@_inner_value.setter
def _inner_value(self, v):
self._value = v
@property
def __private_value(self):
return self.__value
@__private_value.setter
def __private_value(self, v):
self.__value = v
def swap_private_value(self, new):
old = self.__private_value
self.__private_value = new
return old
def _protected_method(self, factor):
return self._value * factor
def __private_method(self, factor):
return self.__value * factor
def check_private_method(self, factor):
return self.__private_method(factor)
value = 123
inst = MyClass(value)
# test attributes
self.assertEqual(inst.value, value)
self.assertEqual(inst._value, value / 2)
self.assertEqual(inst.private_value, value * 2)
# test properties
self.assertEqual(inst._inner_value, inst._value)
freeze_inst_value = inst._value
inst._inner_value -= 1
self.assertEqual(inst._inner_value, freeze_inst_value - 1)
self.assertEqual(inst.swap_private_value(321), value * 2)
self.assertEqual(inst.swap_private_value(value * 2), 321)
# test methods
self.assertEqual(inst._protected_method(3), inst._value * 3)
self.assertEqual(inst.check_private_method(3), inst.private_value * 3)
# test special
self.assertEqual(inst.__value__, value - 1)
inst.__value__ -= 100
self.assertEqual(inst.__value__, value - 101)
# test errors
@njit
def access_dunder(inst):
return inst.__value
with self.assertRaises(errors.TypingError) as raises:
access_dunder(inst)
# It will appear as "_TestJitClass__value" because the `access_dunder`
# is under the scope of 'TestJitClass'.
self.assertIn('_TestJitClass__value', str(raises.exception))
with self.assertRaises(AttributeError) as raises:
access_dunder.py_func(inst)
self.assertIn('_TestJitClass__value', str(raises.exception))
def test_annotations(self):
"""
Methods with annotations should compile fine (issue #1911).
"""
from .annotation_usecases import AnnotatedClass
spec = {'x': int32}
cls = jitclass(spec)(AnnotatedClass)
obj = cls(5)
self.assertEqual(obj.x, 5)
self.assertEqual(obj.add(2), 7)
def test_docstring(self):
@jitclass([])
class Apple(object):
"Class docstring"
def __init__(self):
"init docstring"
def foo(self):
"foo method docstring"
@property
def aval(self):
"aval property docstring"
self.assertEqual(Apple.__doc__, 'Class docstring')
self.assertEqual(Apple.__init__.__doc__, 'init docstring')
self.assertEqual(Apple.foo.__doc__, 'foo method docstring')
self.assertEqual(Apple.aval.__doc__, 'aval property docstring')
def test_kwargs(self):
spec = [('a', int32),
('b', float64)]
@jitclass(spec)
class TestClass(object):
def __init__(self, x, y, z):
self.a = x * y
self.b = z
x = 2
y = 2
z = 1.1
kwargs = {'y': y, 'z': z}
tc = TestClass(x=2, **kwargs)
self.assertEqual(tc.a, x * y)
self.assertEqual(tc.b, z)
def test_default_args(self):
spec = [('x', int32),
('y', int32),
('z', int32)]
@jitclass(spec)
class TestClass(object):
def __init__(self, x, y, z=1):
self.x = x
self.y = y
self.z = z
tc = TestClass(1, 2, 3)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 3)
tc = TestClass(1, 2)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 1)
tc = TestClass(y=2, z=5, x=1)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 5)
def test_default_args_keyonly(self):
spec = [('x', int32),
('y', int32),
('z', int32),
('a', int32)]
TestClass = jitclass(spec)(TestClass1)
tc = TestClass(2, 3)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 3)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 5)
tc = TestClass(y=4, x=2, a=42, z=100)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 100)
self.assertEqual(tc.a, 42)
tc = TestClass(y=4, x=2, a=42)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 42)
tc = TestClass(y=4, x=2)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 5)
def test_default_args_starargs_and_keyonly(self):
spec = [('x', int32),
('y', int32),
('z', int32),
('args', types.UniTuple(int32, 2)),
('a', int32)]
with self.assertRaises(errors.UnsupportedError) as raises:
jitclass(spec)(TestClass2)
msg = "VAR_POSITIONAL argument type unsupported"
self.assertIn(msg, str(raises.exception))
def test_generator_method(self):
spec = []
@jitclass(spec)
class TestClass(object):
def __init__(self):
pass
def gen(self, niter):
for i in range(niter):
yield np.arange(i)
def expected_gen(niter):
for i in range(niter):
yield np.arange(i)
for niter in range(10):
for expect, got in zip(expected_gen(niter), TestClass().gen(niter)):
self.assertPreciseEqual(expect, got)
def test_getitem(self):
spec = [('data', int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, key, data):
self.data[key] = data
def __getitem__(self, key):
return self.data[key]
@njit
def create_and_set_indices():
t = TestClass()
t[1] = 1
t[2] = 2
t[3] = 3
return t
@njit
def get_index(t, n):
return t[n]
t = create_and_set_indices()
self.assertEqual(get_index(t, 1), 1)
self.assertEqual(get_index(t, 2), 2)
self.assertEqual(get_index(t, 3), 3)
def test_getitem_unbox(self):
spec = [('data', int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, key, data):
self.data[key] = data
def __getitem__(self, key):
return self.data[key]
t = TestClass()
t[1] = 10
@njit
def set2return1(t):
t[2] = 20
return t[1]
t_1 = set2return1(t)
self.assertEqual(t_1, 10)
self.assertEqual(t[2], 20)
def test_getitem_complex_key(self):
spec = [('data', int32[:, :])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros((10, 10), dtype=np.int32)
def __setitem__(self, key, data):
self.data[int(key.real), int(key.imag)] = data
def __getitem__(self, key):
return self.data[int(key.real), int(key.imag)]
t = TestClass()
t[complex(1, 1)] = 3
@njit
def get_key(t, real, imag):
return t[complex(real, imag)]
@njit
def set_key(t, real, imag, data):
t[complex(real, imag)] = data
self.assertEqual(get_key(t, 1, 1), 3)
set_key(t, 2, 2, 4)
self.assertEqual(t[complex(2, 2)], 4)
def test_getitem_tuple_key(self):
spec = [('data', int32[:, :])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros((10, 10), dtype=np.int32)
def __setitem__(self, key, data):
self.data[key[0], key[1]] = data
def __getitem__(self, key):
return self.data[key[0], key[1]]
t = TestClass()
t[1, 1] = 11
@njit
def get11(t):
return t[1, 1]
@njit
def set22(t, data):
t[2, 2] = data
self.assertEqual(get11(t), 11)
set22(t, 22)
self.assertEqual(t[2, 2], 22)
def test_getitem_slice_key(self):
spec = [('data', int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, slc, data):
self.data[slc.start] = data
self.data[slc.stop] = data + slc.step
def __getitem__(self, slc):
return self.data[slc.start]
t = TestClass()
# set t.data[1] = 1 and t.data[5] = 2
t[1:5:1] = 1
self.assertEqual(t[1:1:1], 1)
self.assertEqual(t[5:5:5], 2)
@njit
def get5(t):
return t[5:6:1]
self.assertEqual(get5(t), 2)
# sets t.data[2] = data, and t.data[6] = data + 1
@njit
def set26(t, data):
t[2:6:1] = data
set26(t, 2)
self.assertEqual(t[2:2:1], 2)
self.assertEqual(t[6:6:1], 3)
def test_jitclass_longlabel_not_truncated(self):
# See issue #3872, llvm 7 introduced a max label length of 1024 chars
# Numba ships patched llvm 7.1 (ppc64le) and patched llvm 8 to undo this
# change, this test is here to make sure long labels are ok:
alphabet = [chr(ord('a') + x) for x in range(26)]
spec = [(letter * 10, float64) for letter in alphabet]
spec.extend([(letter.upper() * 10, float64) for letter in alphabet])
@jitclass(spec)
class TruncatedLabel(object):
def __init__(self,):
self.aaaaaaaaaa = 10.
def meth1(self):
self.bbbbbbbbbb = random.gauss(self.aaaaaaaaaa, self.aaaaaaaaaa)
def meth2(self):
self.meth1()
# unpatched LLVMs will raise here...
TruncatedLabel().meth2()
def test_pickling(self):
@jitclass(spec=[])
class PickleTestSubject(object):
def __init__(self):
pass
inst = PickleTestSubject()
ty = typeof(inst)
self.assertIsInstance(ty, types.ClassInstanceType)
pickled = pickle.dumps(ty)
self.assertIs(pickle.loads(pickled), ty)
def test_static_methods(self):
@jitclass([("x", int32)])
class Test1:
def __init__(self, x):
self.x = x
def increase(self, y):
self.x = self.add(self.x, y)
return self.x
@staticmethod
def add(a, b):
return a + b
@staticmethod
def sub(a, b):
return a - b
@jitclass([("x", int32)])
class Test2:
def __init__(self, x):
self.x = x
def increase(self, y):
self.x = self.add(self.x, y)
return self.x
@staticmethod
def add(a, b):
return a - b
self.assertIsInstance(Test1.add, Dispatcher)
self.assertIsInstance(Test1.sub, Dispatcher)
self.assertIsInstance(Test2.add, Dispatcher)
self.assertNotEqual(Test1.add, Test2.add)
self.assertEqual(3, Test1.add(1, 2))
self.assertEqual(-1, Test2.add(1, 2))
self.assertEqual(4, Test1.sub(6, 2))
t1 = Test1(0)
t2 = Test2(0)
self.assertEqual(1, t1.increase(1))
self.assertEqual(-1, t2.increase(1))
self.assertEqual(2, t1.add(1, 1))
self.assertEqual(0, t1.sub(1, 1))
self.assertEqual(0, t2.add(1, 1))
self.assertEqual(2j, t1.add(1j, 1j))
self.assertEqual(1j, t1.sub(2j, 1j))
self.assertEqual("foobar", t1.add("foo", "bar"))
with self.assertRaises(AttributeError) as raises:
Test2.sub(3, 1)
self.assertIn("has no attribute 'sub'",
str(raises.exception))
with self.assertRaises(TypeError) as raises:
Test1.add(3)
self.assertIn("not enough arguments: expected 2, got 1",
str(raises.exception))
# Check error message for calling a static method as a class attr from
# another method (currently unsupported).
@jitclass([])
class Test3:
def __init__(self):
pass
@staticmethod
def a_static_method(a, b):
pass
def call_static(self):
return Test3.a_static_method(1, 2)
invalid = Test3()
with self.assertRaises(errors.TypingError) as raises:
invalid.call_static()
self.assertIn("Unknown attribute 'a_static_method'",
str(raises.exception))
def test_import_warnings(self):
class Test:
def __init__(self):
pass
with warnings.catch_warnings(record=True) as ws:
numba.experimental.jitclass([])(Test)
self.assertEqual(len(ws), 0)
numba.jitclass([])(Test)
self.assertEqual(len(ws), 1)
self.assertIs(ws[0].category, errors.NumbaDeprecationWarning)
self.assertIn("numba.experimental.jitclass", ws[0].message.msg)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9fac8bcac13b7d3fb925c12b2609fca9",
"timestamp": "",
"source": "github",
"line_count": 1046,
"max_line_length": 80,
"avg_line_length": 28.161567877629064,
"alnum_prop": 0.5191635264962488,
"repo_name": "sklam/numba",
"id": "87f11657f1b5635b2943eba81fb1ddddcb96d8c2",
"size": "29457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/tests/test_jitclasses.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
}
|
"""Stateless random ops which take seed as a tensor input.
Instead of taking `seed` as an attr which initializes a mutable state within
the op, these random ops take `seed` as an input, and the random numbers are
a deterministic function of `shape` and `seed`.
WARNING: These ops are in contrib, and are not stable. They should be
consistent across multiple runs on the same hardware, but only for the same
version of the code.
@@stateless_multinomial
@@stateless_random_uniform
@@stateless_random_normal
@@stateless_truncated_normal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.stateless.python.stateless_ops import *
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
{
"content_hash": "3df8a813276c6a62c048e78a83293fbb",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 32.57692307692308,
"alnum_prop": 0.7804014167650531,
"repo_name": "snnn/tensorflow",
"id": "30d0a7ab6ae1ebca44df9cba788674fcad69d538",
"size": "1536",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/stateless/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "339398"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49741628"
},
{
"name": "CMake",
"bytes": "195409"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254047"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867093"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58612"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41593453"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "476832"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import unittest
from vodem.api import unlock_nck_time
class TestUnlockNckTime(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'unlock_nck_time': '',
}
def test_call(self):
resp = unlock_nck_time()
self.assertEqual(self.valid_response, resp)
|
{
"content_hash": "a75e7451f2305d99da44f0ed7681a29a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 20.6875,
"alnum_prop": 0.6253776435045317,
"repo_name": "alzeih/python-vodem-vodafone-K4607-Z",
"id": "2bca0f84b370fc6ee5995ec3a8d46540f9fd0324",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/api/test_unlock_nck_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19346"
},
{
"name": "JavaScript",
"bytes": "444689"
},
{
"name": "Python",
"bytes": "84811"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from . import api
from django.views.generic import RedirectView, TemplateView
urlpatterns = [
url(r'^$', view=RedirectView.as_view(url='/model_example/')),
url(r'^model_example/', view=TemplateView.as_view(template_name='yaat_examples/model_example.html'), name='model_example'),
url(r'^model_computed_example/', view=TemplateView.as_view(template_name='yaat_examples/model_computed_example.html'), name='model_computed_example'),
url(r'^stateful/', view=TemplateView.as_view(template_name='yaat_examples/stateful.html'), name='stateful'),
url(r'^api/', include(api.api.urls, namespace='api'))
]
|
{
"content_hash": "76a01bc4218a4f69f0992023e1809f8e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 154,
"avg_line_length": 55,
"alnum_prop": 0.7287878787878788,
"repo_name": "pombredanne/django-yaat",
"id": "7c6286f4a41418c257757b476cec9c29c2763090",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2655"
},
{
"name": "JavaScript",
"bytes": "12114"
},
{
"name": "Python",
"bytes": "24037"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from ..modulos import cargar_datos, cargar_credenciales
import logging
log = logging.getLogger('justine')
class Creacion(TestCase):
"""
NOTA: Sobre la validación de datos, testar directamente nuestra pequeña clase
"""
@classmethod
def setUp(self):
# Cargamos los datos
entidad = cargar_datos('grupo')[0]
self.uid = entidad['cn']
self.datos = {'corpus': entidad}
# Trabajamos en obtener un token
self.token = cargar_credenciales()
# Creamos nuestro objeto para pruebas
from justine import main
from webtest import TestApp
app = main({})
self.testapp = TestApp(app)
@classmethod
def tearDownClass(self):
res = self.testapp.head('/grupos/' + self.uid, status="*", headers=self.token)
if res.status_int == 200:
self.testapp.delete('/grupos/' + self.uid, status=200, headers=self.token)
self.datos = {}
def test_creacion(self):
res = self.testapp.post_json('/grupos', status=201, params=self.datos, headers=self.token)
respuesta = res.location
self.assertEqual(respuesta, 'http://localhost/grupos/%s' % self.uid)
def test_creacion_no_json(self):
datos = "Mínimo esfuerzo para máximo daño"
self.testapp.post_json('/grupos', status=400, params=datos, headers=self.token)
def test_creacion_corpus_faltante(self):
datos = {'cuerpo': self.datos['corpus'].copy()}
self.testapp.post_json('/grupos', status=400, params=datos, headers=self.token)
def test_creacion_usuario_existente(self):
self.testapp.post_json('/grupos', status=409, params=self.datos, headers=self.token)
def test_usuarios_creacion_unauth(self):
self.testapp.post_json('/grupos', status=403, params=self.datos)
|
{
"content_hash": "9b1d20ea056cd7a39941c73d398feac1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 98,
"avg_line_length": 33.44642857142857,
"alnum_prop": 0.6422851041110518,
"repo_name": "VTacius/justine",
"id": "15c1d94df43c455feac9bcae96c0b96eec3980ae",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "justine/tests/funcionales/grupos/testCreacion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77124"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from taiga.base.utils.iterators import iter_queryset
from taiga.projects.notifications.models import HistoryChangeNotification
from taiga.projects.notifications.services import send_sync_notifications
class Command(BaseCommand):
def handle(self, *args, **options):
qs = HistoryChangeNotification.objects.all()
for change_notification in iter_queryset(qs, itersize=100):
send_sync_notifications(change_notification.pk)
|
{
"content_hash": "e34c39f26f9fc9dd86fbce05fe852bbc",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 42.083333333333336,
"alnum_prop": 0.7821782178217822,
"repo_name": "curiosityio/taiga-docker",
"id": "bb259cdeda4fc02762f43f9c4baa161e0d5a544d",
"size": "1418",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/projects/notifications/management/commands/send_notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
}
|
'''
compare
-------
Contains code necessary to evaluate the objective function,
.. math:: \chi^2 = w^2 (x_r - x_c)^2
where :math:`w` is a weight, :math:`x_r` is the reference data point's value,
and :math:`x_c` is the calculated or force field's value for the data point.
'''
from __future__ import print_function
from __future__ import absolute_import
from collections import defaultdict
import sys
import argparse
from argparse import RawTextHelpFormatter
import logging
import logging.config
import numpy as np
import calculate
import constants as co
import datatypes
logger = logging.getLogger(__name__)
def main(args):
parser = return_compare_parser()
opts = parser.parse_args(args)
r_data = calculate.main(opts.reference.split())
c_data = calculate.main(opts.calculate.split())
r_dict = data_by_type(r_data)
c_dict = data_by_type(c_data)
r_dict, c_dict = trim_data(r_dict, c_dict)
score = compare_data(r_dict, c_dict, output=opts.output, doprint=opts.print)
# score = compare_data(r_data, c_data)
# Pretty readouts. Maybe opts.output could have 3 values:
# True, False or None
# Then I wouldn't need 2 if statements here.
# if opts.output or opts.print:
# pretty_data_comp(r_data, c_data, output=opts.output, doprint=opts.print)
logger.log(1, '>>> score: {}'.format(score))
def tor_atoms(da):
daa = da.lbl.split('-')
daa[0] = daa[0].split('_')[-1]
return daa
def trim_data(dict1,dict2):
"""
Within a data type of these dictionaries, data that is not present in both
lists.
"""
for typ in dict1:
if typ == 't':
to_remove = []
# Sometimes calculations are compressed into one macromodel command
# file. This results in the 'pre' and 'opt' structures in the same
# file. Geometeric data from each of these would correspond to r
# and c data for 'pre' and 'opt' structures,respectively. This would
# mean that when trying to compare two data poitns from r and c they
# would not have the exact same lable since they are from different
# structure indicies in the the file. So instaed of just trying to
# see if the labels are the same, we should try and pull out the
# filename and the atoms that comprise that data point. This is what
# is done with the regex below.
for d1 in dict1[typ]:
#if not any(x.lbl == d1.lbl for x in dict2[typ]):
if (len(co.RE_T_LBL.split(d1.lbl))) == 1:
if not any( tor_atoms(d1) == tor_atoms(x) for x in dict2[typ]):
to_remove.append(d1)
else:
if not any(co.RE_T_LBL.split(x.lbl)[1] == co.RE_T_LBL.split(d1.lbl)[1] and
co.RE_T_LBL.split(x.lbl)[2] == co.RE_T_LBL.split(d1.lbl)[2]
for x in dict2[typ]):
to_remove.append(d1)
for d2 in dict2[typ]:
#if not any(x.lbl == d2.lbl for x in dict1[typ]):
if (len(co.RE_T_LBL.split(d2.lbl))) == 1:
if not any( tor_atoms(d2) == tor_atoms(x) for x in dict1[typ]):
to_remove.append(d2)
else:
if not any(co.RE_T_LBL.split(x.lbl)[1] == co.RE_T_LBL.split(d2.lbl)[1] and
co.RE_T_LBL.split(x.lbl)[2] == co.RE_T_LBL.split(d2.lbl)[2]
for x in dict1[typ]):
to_remove.append(d2)
for datum in to_remove:
if datum in dict1[typ] and datum in dict2[typ]:
raise AssertionError("The data point that is flagged to be \
removed is present in both data sets.")
# We may want to keep track of the data that is removed.
if datum in dict1[typ]:
dict1[typ].remove(datum)
if datum in dict2[typ]:
dict2[typ].remove(datum)
if to_remove:
logger.log(20, '>>> Removed Data: {}'.format(len(to_remove)))
dict1[typ] = np.array(dict1[typ], dtype=datatypes.Datum)
dict2[typ] = np.array(dict2[typ], dtype=datatypes.Datum)
return dict1, dict2
def data_by_type(data_iterable):
"""
Takes a iterable of data and creates a dictionary of data types and sets
up an array.
"""
data_by_typ = {}
for datum in data_iterable:
if datum.typ not in data_by_typ:
data_by_typ[datum.typ] = []
data_by_typ[datum.typ].append(datum)
# Parts of the code rely on the data to be in an array form and so we must
# make the dictionary an array. This doesn't make sense to have here since
# we may have to remove data in trim_data().
#for typ in data_by_typ:
# data_by_typ[typ] = np.array(data_by_typ[typ], dtype=datatypes.Datum)
return data_by_typ
def compare_data(r_dict, c_dict, output=None, doprint=False):
"""
This function was formerly called pretty_data_comp().
Now only one function is needed to calculate the score, with the options to
print and direct to an output available.
"""
strings = []
strings.append('--' + ' Label '.ljust(30, '-') +
'--' + ' Weight '.center(7, '-') +
'--' + ' R. Value '.center(11, '-') +
'--' + ' C. Value '.center(11, '-') +
'--' + ' Score '.center(11, '-') +
'--' + ' Row ' + '--')
score_typ = defaultdict(float)
num_typ = defaultdict(int)
score_tot = 0.
total_num = 0
# Do we want the datatypes always reported in the same order? This allows
# the same order of data type to be printed the same everytime.
data_types = []
for typ in r_dict:
data_types.append(typ)
data_types.sort()
total_num_energy = 0
for typ in data_types:
if typ in ['e','eo','ea','eao']:
total_num_energy += len(r_dict[typ])
for typ in data_types:
total_num += int(len(r_dict[typ]))
if typ in ['e','eo','ea','eao']:
correlate_energies(r_dict[typ],c_dict[typ])
import_weights(r_dict[typ])
for r,c in zip(r_dict[typ],c_dict[typ]):
if c.typ == 't':
diff = abs(r.val - c.val)
if diff > 180.:
diff = 360. - diff
else:
diff = r.val - c.val
#score = (r.wht**2 * diff**2)
if typ in ['e', 'eo', 'ea', 'eao']:
score = (r.wht**2 * diff**2)/total_num_energy
elif typ == "h":
score = (c.wht**2 * diff**2)/len(c_dict[typ])
else:
score = (r.wht**2 * diff**2)/len(r_dict[typ])
score_tot += score
score_typ[c.typ] += score
num_typ[c.typ] += 1
if c.typ == 'eig':
if c.idx_1 == c.idx_2:
if r.val < 1100:
score_typ[c.typ + '-d-low'] += score
num_typ[c.typ + '-d-low'] += 1
else:
score_typ[c.typ + '-d-high'] += score
num_typ[c.typ + '-d-high'] += 1
else:
score_typ[c.typ + '-o'] += score
num_typ[c.typ + '-o'] += 1
# print(c.lbl, r.wht, r.val, c.val, score, c.ff_row)
if c.ff_row is None:
strings.append(' {:<30} {:>7.2f} {:>11.4f} {:>11.4f} {:>11.4f} '.format(
c.lbl, r.wht, r.val, c.val, score))
else:
strings.append(' {:<30} {:>7.2f} {:>11.4f} {:>11.4f} {:>11.4f} '\
'{!:>5} '.format(
c.lbl, r.wht, r.val, c.val, score, c.ff_row))
# print(strings)
strings.append('-' * 89)
strings.append('{:<20} {:20.4f}'.format('Total score:', score_tot))
strings.append('{:<30} {:10d}'.format('Total Num. data points:', total_num))
for k, v in num_typ.items():
strings.append('{:<30} {:10d}'.format(k + ':', v))
strings.append('-' * 89)
for k, v in score_typ.items():
strings.append('{:<20} {:20.4f}'.format(k + ':', v))
if output:
with open(output, 'w') as f:
for line in strings:
f.write('{}\n'.format(line))
if doprint:
for line in strings:
print(line)
return score_tot
def return_compare_parser():
"""
Arguments parser for compare.
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
'--calculate', '-c', type=str, metavar = '" commands for calculate.py"',
help=('These commands produce the FF data. Leave one space after the '
'1st quotation mark enclosing the arguments.'))
parser.add_argument(
'--reference', '-r', type=str, metavar='" commands for calculate.py"',
help=('These commands produce the QM/reference data. Leave one space '
'after the 1st quotation mark enclosing the arguments.'))
parser.add_argument(
'--output', '-o', type=str, metavar='filename',
help='Write pretty output to filename.')
parser.add_argument(
'--print', '-p', action='store_true', dest='print',
help='Print pretty output.')
return parser
def compare_data_old(r_data, c_data):
"""
*** This was the original function to compare the two tuples of data before
*** opting in for a dictionary of data types.
Calculates the objective function score after ensuring the energies are
set properly and that the weights are all imported.
"""
# r_data = np.array(sorted(r_data, key=datatypes.datum_sort_key))
# c_data = np.array(sorted(c_data, key=datatypes.datum_sort_key))
# if zero:
# zero_energies(r_data)
assert len(r_data) == len(c_data), \
"Length of reference data and FF data doesn't match!"
correlate_energies(r_data, c_data)
import_weights(r_data)
return calculate_score(r_data, c_data)
# Energies should be zeroed inside calculate now.
# Save this in case that ever changes.
# def zero_energies(data):
# logger.log(1, '>>> zero_energies <<<')
# # Go one data type at a time.
# # We do so because the group numbers are only unique within a given data
# # type.
# for energy_type in ['e', 'eo']:
# # Determine the unique group numbers.
# indices = np.where([x.typ == energy_type for x in data])[0]
# # logger.log(1, '>>> indices: {}'.format(indices))
# # logger.log(1, '>>> data[indices]: {}'.format(data[indices]))
# # logger.log(1, '>>> [x.idx_1 for x in data[indices]]: {}'.format(
# # [x.idx_1 for x in data[indices]]))
# unique_group_nums = set([x.idx_1 for x in data[indices]])
# # Loop through the unique group numbers.
# for unique_group_num in unique_group_nums:
# # Pick out all data points that are unique to this data type
# # and group number.
# more_indices = np.where(
# [x.typ == energy_type and x.idx_1 == unique_group_num
# for x in data])[0]
# # Find the zero for this grouping.
# zero = min([x.val for x in data[more_indices]])
# for ind in more_indices:
# data[ind].val -= zero
def correlate_energies(r_data, c_data):
"""
Finds the indices corresponding to groups of energies from the FF data set.
Uses those same indices to locate the matching energies in the reference
data set.
THIS MEANS THAT THE TWO DATA SETS MUST BE ALIGNED PROPERLY!
Determines the minimum energy in the reference data set, and sets that to
zero in the FF data set.
"""
for indices in select_group_of_energies(c_data):
# Search based on FF data because the reference data may be read from
# a file and lack some of the necessary attributes.
zero, zero_ind = min(
(x.val, i) for i, x in enumerate(r_data[indices]))
zero_ind = indices[zero_ind]
# Now, we need to get that same sub list, and update the calculated
# data. As long as they are sorted the same, the indices should
# match up.
zero = c_data[zero_ind].val
for ind in indices:
c_data[ind].val -= zero
# This is outdated now. Most of this is handled inside calculate.
# 6/29/16 - Actually, now this should be unnecessary simply because the new
# method requires that reference and FF data are aligned. That being
# said, this is probably worth saving anyway.
# def correlate_energies(r_data, c_data):
# logger.log(1, '>>> correlate_energies <<<')
# for indices in select_group_of_energies(r_data):
# if r_data[indices[0]].typ in ['e', 'eo']:
# zero, zero_ind = min(
# (x.val, i) for i, x in enumerate(r_data[indices]))
# zero_ind = indices[zero_ind]
# zero = c_data[zero_ind].val
# for ind in indices:
# c_data[ind].val -= zero
# elif r_data[indices[0]].typ in ['ea', 'eao']:
# avg = sum([x.val for x in r_data[indices]])/len(r_data[indices])
# for ind in indices:
# r_data[ind].val -= avg
# avg = sum([x.val for x in c_data[indices]])/len(c_data[indices])
# for ind in indices:
# c_data[ind].val -= avg
def select_group_of_energies(data):
"""
Used to get the indices (numpy.array) for a single group of energies.
"""
for energy_type in ['e', 'eo']:
# Get all energy indices.
indices = np.where([x.typ == energy_type for x in data])[0]
# logger.log(1, '>>> indices: {}'.format(indices))
# Get the unique group numbers.
# logger.log(1, '>>> data: {}'.format(data))
unique_group_nums = set([x.idx_1 for x in data[indices]])
for unique_group_num in unique_group_nums:
# Get all the indicies for the given energy type and for a single
# group.
more_indices = np.where(
[x.typ == energy_type and x.idx_1 == unique_group_num
for x in data])[0]
yield more_indices
def import_weights(data):
"""
Imports weights for various data types.
Weights can be set in constants.WEIGHTS.
"""
# Check each data point individually for weights.
for datum in data:
if datum.wht is None:
if datum.typ == 'eig':
if datum.idx_1 == datum.idx_2 == 1:
datum.wht = co.WEIGHTS['eig_i']
elif datum.idx_1 == datum.idx_2:
if datum.val < 1100:
datum.wht = co.WEIGHTS['eig_d_low']
else:
datum.wht = co.WEIGHTS['eig_d_high']
elif datum.idx_1 != datum.idx_2:
datum.wht = co.WEIGHTS['eig_o']
else:
datum.wht = co.WEIGHTS[datum.typ]
def calculate_score(r_data, c_data):
"""
*** Depracated: All of this is in compare_data()
Calculates the objective function score.
"""
score_tot = 0.
for r_datum, c_datum in zip(r_data, c_data):
# Could add a check here to assure that the data points are aligned.
# Ex.) assert r_datum.ind_1 == c_datum.ind_1, 'Oh no!'
# For torsions, ensure the difference between -179 and 179 is 2, not
# 358.
if c_datum.typ == 't':
diff = abs(r_datum.val - c_datum.val)
if diff > 180.:
diff = 360. - diff
else:
diff = r_datum.val - c_datum.val
score_ind = r_datum.wht**2 * diff**2
score_tot += score_ind
logger.log(1, '>>> {} {} {}'.format(r_datum, c_datum, score_ind))
logger.log(5, 'SCORE: {}'.format(score_tot))
return score_tot
if __name__ == '__main__':
logging.config.dictConfig(co.LOG_SETTINGS)
main(sys.argv[1:])
|
{
"content_hash": "f0a17b94104b7d7a0ab31d0bba99438d",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 95,
"avg_line_length": 41.92248062015504,
"alnum_prop": 0.5467825443786982,
"repo_name": "ericchansen/q2mm",
"id": "20e71dbc8046d2bfc78c15a73721dce0c98bd38e",
"size": "16246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "q2mm/compare.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "56580"
},
{
"name": "Python",
"bytes": "538769"
},
{
"name": "Shell",
"bytes": "4755"
}
],
"symlink_target": ""
}
|
"""Dictionary to keep track of pit information."""
import os
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
def get_path(grammar):
"""Return the path of the peach pit for the given grammar. Return None if the
Pit does not exist or the grammar is None."""
pit_dir = os.path.join(environment.get_platform_resources_directory(),
'peach', 'pits')
pit_path = os.path.join(pit_dir, grammar + '.xml')
if not os.path.exists(pit_path):
logs.log_error(
'Pit file for "%s" grammar is not found.' % grammar, pit_path=pit_path)
return None
return pit_path
|
{
"content_hash": "94a1e73578626176f0aed6803afec27d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 29.818181818181817,
"alnum_prop": 0.6798780487804879,
"repo_name": "google/clusterfuzz",
"id": "dc4699c6a1175a36ebc0f74401a182875f5a6b21",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clusterfuzz/_internal/bot/fuzzers/libFuzzer/peach/pits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
}
|
from flask_security import Security, SQLAlchemyUserDatastore
from src.user import models
user_data_store = SQLAlchemyUserDatastore(models.db, models.User, models.Role)
security = Security(datastore=user_data_store)
|
{
"content_hash": "0d13b5fca525f65b4b43a7d3e19ff2b9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 78,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.8248847926267281,
"repo_name": "saurabh1e/FlaskStructure",
"id": "c60e84de205d133a5f0ca11fe4794daa04d841ee",
"size": "217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/utils/security.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15121"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016, Michael DeHaan <michael.dehaan@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
def event_sort_key(a):
return (a.time)
class Timeline(object):
def __init__(self):
self.events = []
def add_event(self, event):
self.events.append(event)
def process_due_events(self, now_time, until_time):
"""
Processes all events in a loop that will sleep so they are yielded
at the appropriate time. Loop runs until "until_time" is reached.
"""
self.events = sorted(self.events, key=event_sort_key)
# to avoid timing errors, sleep for N amounts of time but don't use
# the real clock to know when we are! Use time instead as an index!
while now_time <= until_time:
if len(self.events) == 0:
sleep_amount = until_time - now_time
now_time += self._sleep(sleep_amount)
return
# get the time the next event should trigger
last_event_time = self.events[0].time
if last_event_time <= now_time:
# the next event needs to trigger now
event = self.events.pop(0)
if event.notes and len(event.notes) > 0:
# FIXME: when we have control change and volume events this
# will have to change. Right now, it keeps output sane.
yield event
print("PLAYING: %s" % event)
continue
if now_time > until_time:
return
# the next event is too far into the future, sleep until
# the end of the cycle
if last_event_time >= until_time:
sleep_amount = until_time - now_time
now_time = now_time + self._sleep(sleep_amount)
return
# the next event isn't ready yet but we can still play
# it during this cycle
else:
sleep_amount = last_event_time - now_time
now_time = now_time + self._sleep(sleep_amount)
continue
def _sleep(self, amount):
#if amount < 0.05:
# return
#if (amount > 0.02):
# for trivial sleeps, just fake the sleep and advance the clock.
time.sleep(amount)
return amount
def process_off_events(self):
"""
Yield all the off events.
"""
for event in self.events:
if event.off:
print(event)
yield event
|
{
"content_hash": "6d4d188d5286bd19d9e309084d5cc2fa",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 33.075268817204304,
"alnum_prop": 0.5770481144343304,
"repo_name": "mpdehaan/camp",
"id": "8d8bda3276adb42a57fb9d24a2998b236be61a0b",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camp/playback/timeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "224"
},
{
"name": "Python",
"bytes": "124099"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from setuptools.extension import Extension
import os.path as path
from distutils.version import LooseVersion as Version
import numpy as np
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'pypolycomp', 'version.py'), encoding='utf-8') as f:
exec(f.read())
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from Cython import __version__ as cython_version
except ImportError:
use_cython = False
else:
use_cython = Version(cython_version) >= Version('0.18.0')
if use_cython:
print('using cython')
cython_ext = '.pyx'
else:
print('NOT using cython')
cython_ext = '.c'
modules = [Extension("pypolycomp._bindings",
sources=["pypolycomp/_bindings" + cython_ext],
libraries=["polycomp"])]
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
if use_cython:
modules = cythonize(modules)
setup(name="polycomp",
version=__version__,
author="Maurizio Tomasi",
author_email="ziotom78@gmail.com",
description="Python bindings to the libpolycomp C library",
long_description=long_description,
license="MIT",
url="https://github.com/ziotom78/polycomp",
install_requires=["cython >= 0.18", "numpy >= 1.8.2", "astropy >= 0.4", "click"],
include_dirs=[np.get_include()],
ext_modules=modules,
scripts=['polycomp.py'],
packages=['pypolycomp'],
keywords='compression astronomy fits',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: System :: Archiving :: Compression',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
])
|
{
"content_hash": "246dc02957ca0fc271abeefcf425f02a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 31.671641791044777,
"alnum_prop": 0.6314797360980208,
"repo_name": "ziotom78/polycomp",
"id": "79701ada39c1d12a08e8a174970799f6abf5a9e9",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "116573"
}
],
"symlink_target": ""
}
|
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime
project = u'tam.buildout'
copyright = u'%s, Serge Davidov.' % datetime.now().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'buildoutdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index',
'buildout.tex',
u'tam.buildout Documentation',
u'', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "6d974db19d1a2678917b242fc0bf358a",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 31.24848484848485,
"alnum_prop": 0.7119860356865787,
"repo_name": "a25kk/tam",
"id": "af5c556c885287db0accfb2032c1bcd0eb44daa9",
"size": "5991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62731"
},
{
"name": "HTML",
"bytes": "78551"
},
{
"name": "JavaScript",
"bytes": "28591"
},
{
"name": "Makefile",
"bytes": "2549"
},
{
"name": "Python",
"bytes": "42837"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos9_config
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Manage Dell EMC Networking OS9 configuration sections
description:
- OS9 configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with OS9 configuration sections in
a deterministic way.
extends_documentation_fragment: dellos9
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser. This argument is mutually exclusive with I(src).
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When you set this argument to
I(merge), the configuration changes merge with the current
device running configuration. When you set this argument to I(check)
the configuration updates are determined but not actually configured
on the remote device.
default: merge
choices: ['merge', 'check']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: no
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can also be done with the
M(dellos9_config) module.
"""
EXAMPLES = """
- dellos9_config:
lines: ['hostname {{ inventory_hostname }}']
provider: "{{ cli }}"
- dellos9_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
- 50 permit ip host 5.5.5.5 any log
parents: ['ip access-list extended test']
before: ['no ip access-list extended test']
match: exact
- dellos9_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
parents: ['ip access-list extended test']
before: ['no ip access-list extended test']
replace: block
- dellos9_config:
lines: ['hostname {{ inventory_hostname }}']
provider: "{{ cli }}"
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device.
returned: always
type: list
sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1']
saved:
description: Returns whether the configuration is saved to the startup
configuration or not.
returned: When not check_mode.
type: bool
sample: True
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/dellos9_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos9.dellos9 import get_config, get_sublevel_config
from ansible.module_utils.network.dellos9.dellos9 import dellos9_argument_spec, check_args
from ansible.module_utils.network.dellos9.dellos9 import load_config, run_commands
from ansible.module_utils.network.dellos9.dellos9 import WARNING_PROMPTS_RE
from ansible.module_utils.network.common.config import NetworkConfig, dumps
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
commands = module.params['lines'][0]
if (isinstance(commands, dict)) and (isinstance(commands['command'], list)):
candidate.add(commands['command'], parents=parents)
elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)):
candidate.add([commands['command']], parents=parents)
else:
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return contents
def main():
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
src=dict(type='path'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line',
choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
update=dict(choices=['merge', 'check'], default='merge'),
save=dict(type='bool', default=False),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec)
)
argument_spec.update(dellos9_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
check_args(module, warnings)
result = dict(changed=False, saved=False, warnings=warnings)
candidate = get_candidate(module)
if module.params['backup']:
if not module.check_mode:
result['__backup__'] = get_config(module)
commands = list()
if any((module.params['lines'], module.params['src'])):
if match != 'none':
config = get_running_config(module)
if parents:
contents = get_sublevel_config(config, module)
config = NetworkConfig(contents=contents, indent=1)
else:
config = NetworkConfig(contents=config, indent=1)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
if ((isinstance(module.params['lines'], list)) and
(isinstance(module.params['lines'][0], dict)) and
set(['prompt', 'answer']).issubset(module.params['lines'][0])):
cmd = {'command': commands,
'prompt': module.params['lines'][0]['prompt'],
'answer': module.params['lines'][0]['answer']}
commands = [module.jsonify(cmd)]
else:
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
if not module.check_mode and module.params['update'] == 'merge':
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
result['updates'] = commands
if module.params['save']:
result['changed'] = True
if not module.check_mode:
cmd = {'command': 'copy running-config startup-config',
'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'}
run_commands(module, [cmd])
result['saved'] = True
else:
module.warn('Skipping command `copy running-config startup-config`'
'due to check_mode. Configuration not copied to '
'non-volatile storage')
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "31735805ca44746b2a6ba9bf56c98ada",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 98,
"avg_line_length": 39.411764705882355,
"alnum_prop": 0.6469402985074627,
"repo_name": "SergeyCherepanov/ansible",
"id": "01a944a9b4d76a3222ad96393f67f7ea1370388b",
"size": "13596",
"binary": false,
"copies": "41",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/dellos9/dellos9_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import sys
sys.dont_write_bytecode = True
sys.path.append( 'devices' )
sys.path.append( 'libs' )
import getopt
# connection
from libArdySer import ArdySer
################################################################################
# signal stuff
import signal
def signal_handler( signal, frame ):
print( "BREAK." )
sys.exit( 0 )
# and immediately install it on including this library
signal.signal( signal.SIGINT, signal_handler )
#signal.pause()
################################################################################
# main command line interface routines
class PyArdyApp:
def usage( self ):
print "App.py <options>"
print " -a TXT attach to port with 'TXT' matching"
print " -l List detected serial ports"
print " -p perform I2C probe"
print " -s Simulated Arduino connection mode"
def run( self, ardy ):
print "replace this function."
def main( self, argv ):
ardy = ArdySer( True )
doI2cProbe = False
arduinotext = None
try:
opts, args = getopt.getopt(argv,"hlspa:",["arduino="])
except getopt.GetoptError:
self.usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-l':
ardy.listSerialPorts()
return
elif opt == '-s':
ardy.EnableSimulation()
elif opt == '-p':
doI2cProbe = True
elif opt == '-a':
arduinotext = arg
elif opt in ("-a", "--arduino"):
arduinotext = arg
else:
self.usage()
sys.exit()
ardy.openConnection( arduinotext )
if not ardy.isConnected():
print " ===================="
print " Arduino not found!"
print " ===================="
print
self.usage()
return
if doI2cProbe is True:
ardy.i2cProbe()
return
self.run( ardy )
# put this in your main app as well.
#if __name__ == "__main__":
# paa = PyArdyApp()
# paa.main(sys.argv[1:])
|
{
"content_hash": "d7257d221a3b19cc62b7d0d20f76a987",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 18.744897959183675,
"alnum_prop": 0.5617855198693522,
"repo_name": "BleuLlama/LlamaPyArdy",
"id": "60aea805ac46d82c04f9705ea52321c8b45c98e3",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/libs/libPyArdyApp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "21689"
},
{
"name": "C",
"bytes": "1106"
},
{
"name": "Makefile",
"bytes": "471"
},
{
"name": "Python",
"bytes": "48976"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import tape as tape_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.layers.pooling import max_pooling3d
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
return g1 * g2 * g3
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_y = tf_g1 * tf_g2 * tf_g3
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
@test_util.run_in_graph_and_eager_modes
def testAggregateGradientsWithTensor(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
tf_y = tf_g1 * tf_g2
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
self.assertAllClose(grad, tf_grad)
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
@parameterized.named_parameters(
[('Function', def_function.function),
('NoFunction', lambda f: f)])
def testIdentityBehaviorConsistent(self, decorator):
@decorator
def f(x):
x1 = array_ops.identity(x)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(x1)
y1 = x * 2.
y2 = x1 * 3.
loss = y1 + y2
return t.gradient(loss, [x, x1])
self.assertAllClose([2., 3.], f(constant_op.constant(10.)))
def testGradientInsideLoop(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
def body(_):
_ = v + 1.0 # This reads the variable inside the loop context
with backprop.GradientTape() as t:
result = v * 2
self.assertIsNotNone(t.gradient(result, v))
return 1.0
control_flow_ops.while_loop(lambda i: False, body, [1.0])
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
def f(x):
return array_ops.where(x < 10, x, x * x)
g = backprop.gradients_function(f)
self.assertAllEqual(g(5.)[0], 1.0)
self.assertAllEqual(g(50.)[0], 100.0)
def testTwoTargets(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertAllEqual(dx, 2.0)
self.assertAllEqual(dy, 3.0)
def testCustomGradientEmptyError(self):
@custom_gradient.custom_gradient
def identity(x):
def grad(_):
return [] # This return value is wrong!
return x, grad
x = variables.Variable(1.0)
with backprop.GradientTape() as t:
y = identity(x)
with self.assertRaises(ValueError):
t.gradient(y, [x])
def testOutputGradUsedInComputation(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
loss = x * y
dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0])
self.assertAllEqual(dx, 4.0)
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testGradientInteger(self):
def f(x):
return x + x
int_tensor = constant_op.constant(1)
self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None)
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testGradientsFunctionInCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
(y,) = backprop.gradients_function(lambda x: x * x)(x)
def grad(dy):
return [2 * dy]
return y, grad
self.assertAllEqual(f(1.0), 2.0)
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with ops.Graph().as_default(), self.cached_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = self.evaluate(tf_embedding)
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testImplicitGradOrdering(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
v1 = resource_variable_ops.ResourceVariable(2.0)
def f():
x = v1 * v1
y = v0 * v0
return x + y
grads = backprop.implicit_grad(f)()
ordered_variables = [x[1] for x in grads]
self.assertIs(ordered_variables[0], v0)
self.assertIs(ordered_variables[1], v1)
def testTapeNoOpGradient(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeIdentityGradientIsIdentity(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = array_ops.identity(x)
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeGradientMultiTargetOneIsSource(self):
x = constant_op.constant(2.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x*x
self.assertEqual(t.gradient([x, y], x).numpy(), 5.0)
def testTapeNoOpGradientWithMultiTargetAllSource(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient([y, y], x).numpy(), 2.0)
def testTapeNoOpGradientWithMultiTargetMultiSource(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
z = y * y
self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0])
def testTapeGradientStringTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(s)
grads = t.gradient(s, x)
self.assertEqual(grads, None)
def testTapeNoOpGradientStringSourceAndTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(s)
grads = t.gradient(s, s)
self.assertEqual(grads, None)
def testTapeNoOpGradientWithMultiTargetMultiSourceIncludeString(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
t.watch(s)
z = y * y
grads = t.gradient([x, y, z, s], [x, y, s])
self.assertAllEqual(grads[:2], [1.0, 11.0])
self.assertEqual(grads[2], None)
def testTapeNoOpOnVariableIsIdentity(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as t:
y = v0.read_value()
self.assertEqual(t.gradient(y, v0).numpy(), 1.0)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testTapeNoOpGradient2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(1.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTapeNoOpGradientMultiTarget2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(2.0, shape=[2, 2]).numpy())
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
def testTapeStopStartRecording(self):
with backprop.GradientTape(persistent=True) as t:
x = resource_variable_ops.ResourceVariable(1.0)
x2 = x * 2 # This should be differentiated through.
with t.stop_recording():
y = x2 * x2
z = x2 * x2
self.assertEqual(t.gradient(y, x2), None)
# If the x*2 was not differentiated through, this would be 2.0, not 4.0
self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
t.reset()
loss += v * v
self.assertAllEqual(t.gradient(loss, v), 2.0)
def testPythonMax(self):
x = [resource_variable_ops.ResourceVariable(2.),
resource_variable_ops.ResourceVariable(3.),
resource_variable_ops.ResourceVariable(5.)]
with backprop.GradientTape() as t:
f = max(x)
grad = t.gradient(f, x)
self.assertAllEqual(self.evaluate(f), 5.)
self.assertAllEqual(self.evaluate(grad), [None, None, 1.0])
def testAutomaticWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
loss += v * v
self.assertAllEqual([v], t.watched_variables())
def testExplicitWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
@test_util.assert_no_new_tensors
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
with backprop.GradientTape(persistent=True) as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
with self.assertRaises(ValueError):
with t:
pass
@test_util.assert_no_new_tensors
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
@test_util.run_in_graph_and_eager_modes
def testWatchingIsTapeLocal(self):
x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
with backprop.GradientTape() as tape1:
with backprop.GradientTape() as tape2:
tape1.watch(x1)
tape2.watch([x1, x2])
y = x1 ** 3
z = x2 ** 2
dy, dz = tape2.gradient([y, z], [x1, x2])
d2y, d2z = tape1.gradient([dy, dz], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertEqual(self.evaluate(d2y), 12.0)
self.assertIsNone(d2z)
@test_util.assert_no_new_tensors
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=False)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testPersistentMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=True)
_, vjp = wrapped_fn(constant_op.constant(3.0))
vjp_result1 = vjp(2.0)[0]
vjp_result2 = vjp(2.0)[0]
self.assertAllEqual(vjp_result1, vjp_result2, 12.0)
@test_util.assert_no_new_tensors
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
@test_util.assert_no_new_tensors
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
@test_util.assert_no_new_tensors
def testStopGradient(self):
grad = backprop.gradients_function(
lambda x: array_ops.stop_gradient(math_ops.argmax(x)))
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testArgmax(self):
def argmax(x):
i = math_ops.argmax(x)
return array_ops.stop_gradient(i)
grad = backprop.gradients_function(argmax)
self.assertAllEqual(grad([0.0])[0], None)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPU(self):
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPUImplicitGrad(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
@test_util.assert_no_new_tensors
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyGPU2CPU2GPU(self):
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
@test_util.assert_no_new_tensors
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
def testGradientTapeReEnterContext(self):
g = backprop.GradientTape()
with g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2*x
with g:
z = 2*y
grad = g.gradient(target=z, sources=[x])
self.assertEqual(self.evaluate(grad), [4.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
grad = g.gradient(target=y, sources=[x, x])
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
g.watch(x)
g.watch(y)
z = x * x + x * y
grad = g.gradient(target=z, sources=[x, x])
self.assertEqual(self.evaluate(grad), [11.0, 11.0])
grad = g.gradient(target=z, sources=[y, x])
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
# cached, leading to a different gradient then what one might expect.
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.1)
x3 = constant_op.constant(3.2)
g.watch(x1)
g.watch(x2)
g.watch(x3)
y = x1 + 2 * x2 + 3 * x3
self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0])
self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,))
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0))
self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])),
[(1.0, 2.0), (2.0, 3.0)])
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))),
(1.0, 2.0, [1.0, 3.0]))
self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])),
[1.0, {'x2': 2.0, 'x3': 3.0}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGadientTapeCalledOnConstantTarget(self):
with backprop.GradientTape() as g:
x = variables.Variable([3.0])
y = variables.Variable([2.0])
grad = g.gradient(x, y)
self.assertAllEqual(grad, None)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
def true_fn():
return x
def false_fn():
return x * x
with backprop.GradientTape() as g:
g.watch(x)
y = control_flow_ops.cond(x < x, true_fn, false_fn)
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
def cond(i, _):
return i < 3
def body(i, x):
return i + 1, x * 2
with backprop.GradientTape() as g:
g.watch([x])
_, y = control_flow_ops.while_loop(cond, body, [i, x])
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 4.0)
@test_util.assert_no_new_tensors
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3)
dy_dx = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy_dx), 2 * 3)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x ** 3 # y := x^3
dy_dx = g.gradient(y, x) # dy/dx := 3x^2
d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x
d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6
x = 3
self.assertEqual(self.evaluate(y), x ** 3)
self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2)
self.assertEqual(self.evaluate(d2y_dx2), 6 * x)
self.assertEqual(self.evaluate(d3y_dx3), 6)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape(persistent=True) as gg:
gg.watch(y)
z = 2 * y
for _ in range(2):
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
del gg
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
grad = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(grad), 12.0)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch(x)
y = x * x
z = y * y
dz_dx, dz_dy = g.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 108.0)
self.assertEqual(self.evaluate(dz_dy), 18.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsDefault(self):
x = constant_op.constant(1.0)
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x)
self.assertEqual(dz_dx, None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsZeros(self):
x = constant_op.constant(1.0, shape=[2, 2])
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsVariablesZeros(self):
x = resource_variable_ops.ResourceVariable(
constant_op.constant(1., shape=[2, 2]))
self.evaluate(x.initializer)
y = resource_variable_ops.ResourceVariable(constant_op.constant(3.))
self.evaluate(y.initializer)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.run_in_graph_and_eager_modes
def testUnknownUnconnectedGradientsValueGiven(self):
x = constant_op.constant(1.0)
y = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
with self.assertRaisesRegexp(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
g.gradient(z, x, unconnected_gradients='nonsense')
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsNestedDefunZeros(self):
@function.defun
def f(x):
return x * x
@function.defun
def h(y):
z = f(y)
return array_ops.stop_gradient(z)
x = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch(x)
k = x + 2.
y = h(k)
dy_dx = g.gradient(y, x, unconnected_gradients='zero')
self.assertEqual(0.0, self.evaluate(dy_dx))
def testInvalidRecordOperationMessage(self):
y = constant_op.constant(2.)
x = constant_op.constant(1.)
with backprop.GradientTape() as g:
g.watch(x)
tape_lib.record_operation(
'InvalidBackprop',
[y],
[x],
lambda dy: [])
with self.assertRaisesRegexp(
errors_impl.InternalError, 'InvalidBackprop.*too few gradients'):
g.gradient(y, x)
@test_util.assert_no_new_tensors
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
@test_util.assert_no_new_tensors
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyCPU2GPU2CPU(self):
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
@test_util.assert_no_new_tensors
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
@test_util.assert_no_new_tensors
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
@test_util.assert_no_new_tensors
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3., name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
def testJacobianCustomGradient(self):
class MyCallable(object):
def __init__(self):
self.a = variables.Variable(1.)
self.b = variables.Variable(2.)
self.c = variables.Variable(3.)
def __call__(self, x):
return self.a * x * x + self.b * x + self.c
@def_function.function
def call(c, x):
@custom_gradient.custom_gradient
def _call():
y = c(x)
def grad(dy, variables=None): # pylint: disable=redefined-outer-name
with backprop.GradientTape(persistent=True) as g:
g.watch(variables)
y = c(x)
grad_vars = [
2 * math_ops.reduce_sum(dy * g.jacobian(y, v)) for v in variables
]
del g
return (), grad_vars
return y, grad
return _call()
c = MyCallable()
x = constant_op.constant([1., 2., 3.])
with backprop.GradientTape(persistent=True) as g:
g.watch([c.a, c.b, c.c])
y = call(c, x)
self.assertAllEqual(g.gradient(y, x), None)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
@test_util.assert_no_new_tensors
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
def testZerosCacheDoesntLeakAcrossGraphs(self):
with ops.Graph().as_default():
def get_grad():
with ops.Graph().as_default(), self.cached_session():
t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
with backprop.GradientTape() as tape:
tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1**2
y = array_ops.concat([y1, t], axis=1)
return self.evaluate(tape.gradient(y, x))
grad1 = get_grad()
grad2 = get_grad()
self.assertAllEqual(grad1, grad2)
@test_util.run_in_graph_and_eager_modes
def testSelectivelyWatchVariables(self):
x1 = resource_variable_ops.ResourceVariable(1.0)
x2 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x2)
y = x1**2
z = x2**3
self.assertTupleEqual(tape.watched_variables(), (x2,))
dy, dz = tape.gradient([y, z], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertIsNone(dy)
self.assertEqual(self.evaluate(dz), 3.0)
@test_util.run_in_graph_and_eager_modes
def testDifferentiatingScalarCache(self):
# In the following test, if x2 = x1 (i.e the objects are the exact same),
# then y is essentially, 2*x1, and dy/dx1 = 2.
# When we had a pure scalar cache in eager, this would be the case. This
# test prevents us from going back to that case.
with backprop.GradientTape(persistent=False) as g:
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.0)
g.watch(x1)
g.watch(x2)
y = x1 + x2
grad = g.gradient(target=y, sources=[x1])
self.assertEqual(self.evaluate(grad), [1.0])
def testVariablesAndConstantsProduceTheSameGradients(self):
# In the following test, differentiating [y, z] against [a, b] gives:
# (dy/da + dz/da, dy/db + dz/db).
# If a and b are the same constant, dz/da will not be 0 (which it should
# be).
# This is solved by using variable since doing a read_value on a tensor will
# produce a new tensor and corresponding TensorHandle, and not reuse the
# same tensor (which would happen if we are using a cache and reusing
# EagerTensor objects).
def get_grads(a, b):
with backprop.GradientTape() as tape:
tape.watch([a, b])
y = a**3
z = b**2
return tape.gradient([y, z], [a, b])
gradients_constants = get_grads(
constant_op.constant(2.0), constant_op.constant(2.0))
gradients_variables = get_grads(
resource_variable_ops.ResourceVariable(2.0),
resource_variable_ops.ResourceVariable(2.0))
self.assertAllEqual(gradients_constants, gradients_variables)
def testUnknownShapes(self):
with ops.Graph().as_default():
with backprop.GradientTape() as tape:
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
tape.watch(a)
b = a**3
db_da = tape.gradient(b, a)
with self.cached_session() as sess:
self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0}))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientInEagerAndGraph(self):
@custom_gradient.custom_gradient
def f(x):
y = x * x
def grad(dy):
return [4 * dy]
return y, grad
with backprop.GradientTape() as t:
c = constant_op.constant(1.0)
t.watch(c)
g = f(c)
self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0)
def testOverrideSecondOrderWithCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
def first_order_grad(dz):
@custom_gradient.custom_gradient
def first_order_custom(unused_x):
def h(ddz):
return -2.1 * ddz
return -1.1, h
return dz * first_order_custom(x)
return x + 10., first_order_grad
c = constant_op.constant(1.)
with backprop.GradientTape() as outer:
outer.watch(c)
with backprop.GradientTape() as inner:
inner.watch(c)
d = f(c) ** 4.
dd = inner.gradient(d, c)
self.assertAllClose(4. * f(c) ** 3. * -1.1, dd)
self.assertAllClose(3. * 4. * f(c) ** 2. * -1.1 * -1.1
+ 4. * f(c) ** 3. * -2.1,
outer.gradient(dd, c))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientForwardprop(self):
@custom_gradient.custom_gradient
def f(x):
z = 2. * tensor_util.constant_value(x)
def g(dz):
@custom_gradient.custom_gradient
def first_order(unused_x, unused_dz):
def second_order_and_transpose(unused_ddz):
return 2.2, 3.1
return 2.1, second_order_and_transpose
return first_order(x, dz)
return z, g
with backprop.GradientTape(persistent=True) as t:
with backprop.GradientTape() as tt:
c = constant_op.constant(1.)
t.watch(c)
tt.watch(c)
output_grad = array_ops.ones([])
t.watch(output_grad)
output = f(c)
self.assertAllClose(2., output)
gc = tt.gradient(output, c, output_gradients=output_grad)
self.assertAllClose(2.1, gc)
ggc = t.gradient(gc, c)
self.assertAllClose(2.2, ggc)
# Note that executed eagerly this kind of transpose is not efficient. But
# from a tf.function we could prune out the first-order gradient
# computation.
transpose = t.gradient(gc, output_grad)
self.assertAllClose(3.1, transpose)
@test_util.run_in_graph_and_eager_modes
def testMaxPooling3DGradient(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
def forward(a):
r = max_pooling3d(a, pool_size=pool_size, strides=strides, padding='SAME')
return r
input_sizes = [1, 3, 2, 4, 1]
pool_size = (2, 2, 1)
strides = (1, 1, 1)
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32)
aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
da = backprop.gradients_function(forward)(aa)
if not context.executing_eagerly():
tf_aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
tf_max = max_pooling3d(
tf_aa, pool_size=pool_size, strides=strides, padding='SAME')
tf_da = gradients.gradients(tf_max, [tf_aa])
self.assertAllEqual(da[0], tf_da[0].eval())
@test_util.run_in_graph_and_eager_modes
def testWatchBadThing(self):
g = backprop.GradientTape()
with self.assertRaisesRegexp(ValueError, 'ndarray'):
g.watch(np.array(1.))
def testWatchedVariablesAfterNonPersistentGradientCall(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
tape.gradient(x, x)
self.assertEqual((x,), tape.watched_variables())
def testWatchedVariablesOnlyHasVariablesFromLastTape(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
with backprop.GradientTape(persistent=False) as tape:
z = resource_variable_ops.ResourceVariable(2.0)
tape.watch(z)
tape.gradient(z, z)
self.assertEqual((z,), tape.watched_variables())
def testWatchedVariablesRespectReset(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
self.assertEqual((x,), tape.watched_variables())
tape.reset()
z = resource_variable_ops.ResourceVariable(2.0)
tape.watch(z)
self.assertEqual((z,), tape.watched_variables())
tape.gradient(z, z)
self.assertEqual((z,), tape.watched_variables())
class JacobianTest(test.TestCase):
def _jacobian(self, experimental_use_pfor):
persistent = context.executing_eagerly and not experimental_use_pfor
with backprop.GradientTape(persistent=persistent) as g:
x = constant_op.constant([1., 2.])
y = constant_op.constant([3., 4.])
g.watch(x)
g.watch(y)
z = x * x * y
jacobian = g.jacobian(z, [x, y],
experimental_use_pfor=experimental_use_pfor)
answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)]
return jacobian, answer
@test_util.run_v1_only('b/120545219')
def testPfor(self):
jacobian, answer = self._jacobian(experimental_use_pfor=True)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoop(self):
jacobian, answer = self._jacobian(experimental_use_pfor=False)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPforDefun(self):
@function.defun
def _f():
return self._jacobian(experimental_use_pfor=True)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoopDefun(self):
@function.defun
def _f():
return self._jacobian(experimental_use_pfor=False)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
if not context.executing_eagerly():
return
with backprop.GradientTape() as g:
x = constant_op.constant([1.0, 2.0])
g.watch(x)
y = x * x
with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.jacobian(y, x, experimental_use_pfor=False)
@test_util.run_v1_only('b/120545219')
def testPforException(self):
var = variables.Variable([1.])
@custom_gradient.custom_gradient
def op(x):
def grad(_):
# Note that we perform a stateful operation here that will not be
# compatible with parallel for construct.
with ops.control_dependencies(
[var.assign(random_ops.random_uniform([1]))]):
return constant_op.constant(1.)
return x, grad
with backprop.GradientTape() as g:
x = constant_op.constant([1., 2.])
g.watch(x)
y = op(x)
with self.assertRaisesRegexp(ValueError, 'No converter'):
g.jacobian(y, x, experimental_use_pfor=True)
@test_util.run_v1_only('b/120545219')
def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]])
g.watch(x)
y = math_ops.matmul(x, x)
self.assertAllClose(g.jacobian(y, x, parallel_iterations=2),
g.jacobian(y, x, parallel_iterations=3))
@test_util.run_in_graph_and_eager_modes
def test_nested_jacobian(self):
if context.executing_eagerly():
# TODO(agarwal): b/128842926
self.skipTest('Conversion of function calls not implemented yet.')
x = array_ops.ones((10, 2))
with backprop.GradientTape(persistent=False) as g:
g.watch(x)
with backprop.GradientTape(persistent=False) as gg:
gg.watch(x)
y = math_ops.reduce_sum(math_ops.square(x))
dy_x = gg.jacobian(y, x)
dy_xx = g.batch_jacobian(dy_x, x)
dy_xx_answer = [[[2., 0], [0, 2.]]] * 10
self.assertAllClose(dy_xx_answer, self.evaluate(dy_xx))
@test_util.run_in_graph_and_eager_modes
def test_indexed_slices(self):
with backprop.GradientTape(persistent=True) as g:
inp = random_ops.random_uniform([3, 2])
g.watch(inp)
output = nn.embedding_lookup(inp, [0, 2])
self.assertAllClose(
g.jacobian(output, inp, experimental_use_pfor=True),
g.jacobian(output, inp, experimental_use_pfor=False))
@test_util.run_all_in_graph_and_eager_modes
class BatchJacobianTest(test.TestCase, parameterized.TestCase):
def _batch_jacobian(self, experimental_use_pfor):
persistent = context.executing_eagerly and not experimental_use_pfor
with backprop.GradientTape(persistent=persistent) as g:
x = constant_op.constant([[1., 2.], [3., 4.]])
y = constant_op.constant([[3., 4.], [5., 6.]])
g.watch(x)
z = x * x * y
batch_jacobian = g.batch_jacobian(
z, x, experimental_use_pfor=experimental_use_pfor)
answer = array_ops.stack([array_ops.diag(2 * x[0] * y[0]),
array_ops.diag(2 * x[1] * y[1])])
return batch_jacobian, answer
def testPfor(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True)
self.assertAllEqual(answer, batch_jacobian)
def testWhileLoop(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False)
self.assertAllEqual(answer, batch_jacobian)
def testPforDefun(self):
@function.defun
def _f():
return self._batch_jacobian(experimental_use_pfor=True)
batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian)
def testWhileLoopDefun(self):
@function.defun
def _f():
return self._batch_jacobian(experimental_use_pfor=False)
batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian)
def testPersistentTape(self):
if not context.executing_eagerly():
return
with backprop.GradientTape() as g:
x = constant_op.constant([[1.0, 2.0]])
g.watch(x)
y = x * x
with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.batch_jacobian(y, x, experimental_use_pfor=False)
def testBadShape(self):
x = random_ops.random_uniform([2, 3])
with backprop.GradientTape() as g:
y = array_ops.concat([x, x], axis=0)
with self.assertRaisesRegexp(ValueError, 'Need first dimension'):
g.batch_jacobian(y, x)
def testBadInputRank(self):
x = random_ops.random_uniform([2])
with backprop.GradientTape() as g:
y = random_ops.random_uniform([2, 2])
with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
g.batch_jacobian(y, x)
def testBadOutputRank(self):
x = random_ops.random_uniform([2, 2])
with backprop.GradientTape() as g:
y = random_ops.random_uniform([2])
with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
g.batch_jacobian(y, x)
def testPforException(self):
var = variables.Variable([1.])
@custom_gradient.custom_gradient
def op(x):
def grad(_):
# Note that we perform a stateful operation here that will not be
# compatible with parallel for construct.
with ops.control_dependencies(
[var.assign(random_ops.random_uniform([1]))]):
return constant_op.constant(1.)
return x, grad
with backprop.GradientTape() as g:
x = constant_op.constant([[1.], [2.]])
g.watch(x)
y = op(x)
with self.assertRaisesRegexp(ValueError, 'No converter'):
g.batch_jacobian(y, x, experimental_use_pfor=True)
def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]])
g.watch(x)
w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]])
y = math_ops.matmul(x, w)
self.assertAllClose(g.batch_jacobian(y, x, parallel_iterations=2),
g.batch_jacobian(y, x, parallel_iterations=3))
@parameterized.parameters(
(True, True),
(True, False),
(False, True),
(False, False))
def test_degenerate_shape(self, use_function, use_pfor):
def f(x):
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
y = x**2
return tape.batch_jacobian(y, x, experimental_use_pfor=use_pfor)
if use_function:
f = def_function.function(f)
self.assertAllEqual([1, 0, 0], array_ops.shape(f(array_ops.zeros([1, 0]))))
class AggregateIndexedSlicesGradientsTest(test_util.TensorFlowTestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def testNoGradients(self):
self.assertIsNone(backprop.aggregate_indexed_slices_gradients([]))
def testOneGradient(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
result = backprop.aggregate_indexed_slices_gradients([t])
self._assert_indexed_slices_equal(t, result)
def testMultipleGradients(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
def testMultipleGradientsWithNones(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
t3 = None
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1, t3])
self._assert_indexed_slices_equal(total, result)
def testMixedTensorAndIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop.aggregate_indexed_slices_gradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "a713ee7ca7b696e0f3ef8879bd815b12",
"timestamp": "",
"source": "github",
"line_count": 1760,
"max_line_length": 80,
"avg_line_length": 32.47386363636364,
"alnum_prop": 0.6366308569828883,
"repo_name": "ppwwyyxx/tensorflow",
"id": "23cfbd44972db8ec4322220bb48f84c6c5e14f46",
"size": "57843",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/backprop_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45318"
},
{
"name": "C",
"bytes": "796611"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76521274"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952883"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1254789"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297774"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38709528"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7469"
},
{
"name": "Shell",
"bytes": "643731"
},
{
"name": "Smarty",
"bytes": "34743"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""Account settings blueprint for oauthclient."""
from __future__ import absolute_import
from operator import itemgetter
import six
from flask import Blueprint, current_app, render_template, request
from flask_babelex import gettext as _
from flask_breadcrumbs import register_breadcrumb
from flask_login import current_user, login_required
from flask_menu import register_menu
from ..models import RemoteAccount
blueprint = Blueprint(
'invenio_oauthclient_settings',
__name__,
url_prefix='/account/settings/linkedaccounts',
static_folder='../static',
template_folder='../templates',
)
@blueprint.route('/', methods=['GET', 'POST'])
@login_required
@register_menu(
blueprint, 'settings.oauthclient',
_('%(icon)s Linked accounts', icon='<i class="fa fa-link fa-fw"></i>'),
order=3,
active_when=lambda: request.endpoint.startswith(
'invenio_oauthclient_settings.'),
visible_when=lambda: bool(current_app.config.get(
'OAUTHCLIENT_REMOTE_APPS')) is not False
)
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.oauthclient', _('Linked accounts')
)
def index():
"""List linked accounts."""
oauth = current_app.extensions['oauthlib.client']
services = []
service_map = {}
i = 0
for appid, conf in six.iteritems(
current_app.config['OAUTHCLIENT_REMOTE_APPS']):
if not conf.get('hide', False):
services.append(dict(
appid=appid,
title=conf['title'],
icon=conf.get('icon', None),
description=conf.get('description', None),
account=None
))
service_map[oauth.remote_apps[appid].consumer_key] = i
i += 1
# Fetch already linked accounts
accounts = RemoteAccount.query.filter_by(
user_id=current_user.get_id()
).all()
for a in accounts:
if a.client_id in service_map:
services[service_map[a.client_id]]['account'] = a
# Sort according to title
services.sort(key=itemgetter('title'))
return render_template(
'invenio_oauthclient/settings/index.html',
services=services
)
|
{
"content_hash": "2eda5afe8ae36fca3e3c3742c0c7bba0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 75,
"avg_line_length": 29.093333333333334,
"alnum_prop": 0.6416131989000916,
"repo_name": "tiborsimko/invenio-oauthclient",
"id": "91268a4e7201acf1f1fa34a391a9d57e0cf741cb",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_oauthclient/views/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6539"
},
{
"name": "Python",
"bytes": "174905"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
}
|
import os
import sys
import xmlrpclib
from shutil import rmtree
from subprocess import check_call
TEMP_DIR = 'tmp'
PROJECT_NAME = 'cctrl'
DIST_DIR = os.path.join(TEMP_DIR, 'dist')
def main():
if is_current_version():
sys.exit("Version is not updated. Aborting release.")
dist()
cleanup()
def is_current_version():
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
return pypi.package_releases('cctrl')[0] == __version__
def dist():
try:
check_call(['python',
'setup.py',
'sdist',
'--dist-dir={0}'.format(DIST_DIR),
'--formats=gztar',
'upload'])
except OSError as e:
cleanup()
sys.exit(e)
def cleanup():
rmtree(TEMP_DIR)
if __name__ == '__main__':
execfile(os.path.join(PROJECT_NAME, 'version.py'))
main()
|
{
"content_hash": "81a0f3f917edffadfbd8f89a5fbc1910",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 63,
"avg_line_length": 20.431818181818183,
"alnum_prop": 0.5539488320355951,
"repo_name": "cloudControl/cctrl",
"id": "54e378193baa421a6894ada59125644968c2a09f",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ci/release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Inno Setup",
"bytes": "25071"
},
{
"name": "Python",
"bytes": "165325"
},
{
"name": "Shell",
"bytes": "2073"
}
],
"symlink_target": ""
}
|
"""Verify the setchplenv.* script correctly set the environment for using chpl,
et al.
Also verify there are the correct setchplenv.* scripts and error if new ones
show up without updating this test.
"""
from __future__ import print_function
import distutils.spawn
import glob
import os
import os.path
import shlex
import subprocess
import sys
import unittest
# Add the chplenv dir to the python path.
chplenv_dir = os.path.join(os.path.dirname(__file__), '../../util/chplenv')
sys.path.insert(0, os.path.abspath(chplenv_dir))
import chpl_platform
def _skip_if(condition, reason):
"""Wrapper around unittest.skipIf, if available. If not, do not run test
when condition is True. unittest.skipIf was not added until python
2.7. Chapel testing requires python 2.6 support.
"""
if hasattr(unittest, 'skipIf'):
return unittest.skipIf(condition, reason)
elif condition:
# Skip the test by instead running a noop function that just print a
# message.
def skip_it(func):
def noop(*args, **kwargs):
print('Skipping "{0}" because: {1}'.format(func.__name__, reason))
return noop
return skip_it
else:
# Run the test by simply returning the original test function.
def pass_thru(func):
return func
return pass_thru
class SetChplEnvTests(unittest.TestCase):
known_shells = ['bash', 'csh', 'fish', 'sh']
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
chpl_home = os.environ.get('CHPL_HOME', repo_root)
chpl_platform = chpl_platform.get()
util_dir = os.path.join(chpl_home, 'util')
quickstart_dir = os.path.join(util_dir, 'quickstart')
@classmethod
def setchplenv_script(cls, shell):
return 'setchplenv.{0}'.format(shell)
@classmethod
def util_script(cls, shell):
return os.path.join(cls.util_dir, cls.setchplenv_script(shell))
@classmethod
def quickstart_script(cls, shell):
return os.path.join(cls.quickstart_dir, cls.setchplenv_script(shell))
def run_cmd(self, shell, cmd):
"""Run command in shell and return output."""
proc = subprocess.Popen(
[shell],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.chpl_home,
env=os.environ.copy()
)
out, _ = proc.communicate(input=cmd)
if proc.returncode != 0:
raise ValueError('Non-zero exit code ({0}) from: {1}'.format(
proc.returncode, cmd))
return out
def test_known_shells(self):
"""Verify all known shells have equivalent setchplenv.* scripts in util/ and
util/quickstart/.
"""
for shell in self.known_shells:
self.assertTrue(os.path.exists(self.util_script(shell)))
self.assertTrue(os.path.exists(self.quickstart_script(shell)))
def test_for_new_scripts(self):
"""Verify new scripts are tested here."""
util_scripts = glob.glob(self.util_script('*'))
quickstart_scripts = glob.glob(self.quickstart_script('*'))
self.assertEqual(len(self.known_shells), len(util_scripts),
'Untested setchplenv.* script in util/')
self.assertEqual(len(self.known_shells), len(quickstart_scripts),
'Untested setchplenv.* script in util/quickstart/')
for shell in self.known_shells:
self.assertTrue(hasattr(self, 'test_setchplenv__{0}'.format(shell)),
'No test case for shell: {0}'.format(shell))
def check_shell(self, shell, setchplenv_script, source_cmd, path_sep,
post_source_cmd=None, shell_cmd=None):
"""Run checks for a single shell."""
if shell_cmd is None:
shell_cmd = shell
def get_cmd(cmd):
prefix = '{source_cmd} {setchplenv_script}\n'.format(
source_cmd=source_cmd, setchplenv_script=setchplenv_script)
if post_source_cmd is not None:
prefix = '{0}{1}\n'.format(prefix, post_source_cmd)
return '{0}{1}\n'.format(prefix, cmd)
def get_var(var_name):
"""Get and return a var from setchplenv.* env."""
out = self.run_cmd(shell_cmd, get_cmd('echo ${0}'.format(var_name)))
value = out.splitlines()[-1]
return value
def get_path_var(var_name):
"""Gets the path-like var and returns a list of its parts."""
path = get_var(var_name)
path_parts = path.split(path_sep)
return path_parts
path_parts = get_path_var('PATH')
self.assertTrue(len(path_parts) >= 2)
expected_path_chpl = os.path.join(self.chpl_home, 'bin', self.chpl_platform)
expected_path_util = os.path.join(self.chpl_home, 'util')
actual_path_chpl = path_parts[0]
actual_path_util = path_parts[1]
self.assertEqual(os.stat(expected_path_chpl), os.stat(actual_path_chpl))
self.assertEqual(os.stat(expected_path_util), os.stat(actual_path_util))
manpath_parts = get_path_var('MANPATH')
self.assertTrue(len(manpath_parts) >= 1)
expected_manpath_chpl = os.path.join(self.chpl_home, 'man')
manpath_chpl = manpath_parts[0]
self.assertEqual(os.stat(expected_manpath_chpl), os.stat(manpath_chpl))
actual_chpl_home = get_var('CHPL_HOME')
self.assertEqual(os.stat(self.chpl_home), os.stat(actual_chpl_home))
actual_platform = get_var('CHPL_HOST_PLATFORM')
self.assertEqual(self.chpl_platform, actual_platform)
if 'quickstart' in setchplenv_script:
self.assertEqual('none', get_var('CHPL_COMM'))
self.assertEqual('fifo', get_var('CHPL_TASKS'))
self.assertEqual('none', get_var('CHPL_GMP'))
self.assertEqual('none', get_var('CHPL_REGEXP'))
self.assertEqual('none', get_var('CHPL_LLVM'))
# TODO: Re-add this check when/if tcmalloc becomes
# default. (thomasvandoren, 2015-01-13)
# self.assertEqual('cstdlib', get_var('CHPL_MEM'))
def check_scripts(self, shell, source_cmd, path_sep,
post_source_cmd=None, shell_cmd=None):
"""Run check_shell on util/ and quickstart/ scripts."""
self.check_shell(shell, self.util_script(shell), source_cmd, path_sep,
post_source_cmd=post_source_cmd, shell_cmd=shell_cmd)
self.check_shell(shell, self.quickstart_script(shell), source_cmd, path_sep,
post_source_cmd=post_source_cmd, shell_cmd=shell_cmd)
def test_setchplenv__bash(self):
"""Verify bash versions of setchplenv.* work as expected."""
self.check_scripts('bash', 'source', ':')
def test_setchplenv__csh(self):
"""Verify csh versions of setchplenv.* work as expected."""
self.check_scripts('csh', 'source', ':', post_source_cmd='rehash', shell_cmd='tcsh')
@_skip_if(distutils.spawn.find_executable('fish') is None,
'fish is not installed on system.')
def test_setchplenv__fish(self):
"""Verify fish versions of setchplenv.* work as expected."""
self.check_scripts('fish', '.', ' ')
def test_setchplenv__sh(self):
"""Verify sh versions of setchplenv.* work as expected."""
self.check_scripts('sh', '.', ':')
@_skip_if(distutils.spawn.find_executable('zsh') is None,
'zsh is not installed on system.')
def test_setchplenv__zsh(self):
"""Verify bash versions of setchplenv.* work as expected with zsh."""
self.check_scripts('bash', 'source', ':', shell_cmd='zsh')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "29e5bfbc218cf91b66c960481723191c",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 92,
"avg_line_length": 38.90594059405941,
"alnum_prop": 0.6088560885608856,
"repo_name": "chizarlicious/chapel",
"id": "8a269c91f24c5ea3921149332c0092489843c60e",
"size": "7882",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/setchplenv/verify_setchplenv_scripts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2072"
},
{
"name": "C",
"bytes": "3689430"
},
{
"name": "C++",
"bytes": "3493095"
},
{
"name": "CSS",
"bytes": "919"
},
{
"name": "Chapel",
"bytes": "11905780"
},
{
"name": "Cuda",
"bytes": "4304"
},
{
"name": "Emacs Lisp",
"bytes": "14304"
},
{
"name": "FORTRAN",
"bytes": "18153"
},
{
"name": "Gnuplot",
"bytes": "5536"
},
{
"name": "HTML",
"bytes": "2419"
},
{
"name": "JavaScript",
"bytes": "50663"
},
{
"name": "LLVM",
"bytes": "16367"
},
{
"name": "Lex",
"bytes": "37600"
},
{
"name": "Makefile",
"bytes": "108072"
},
{
"name": "Mathematica",
"bytes": "4971"
},
{
"name": "Perl",
"bytes": "240233"
},
{
"name": "Python",
"bytes": "646199"
},
{
"name": "Shell",
"bytes": "174157"
},
{
"name": "TeX",
"bytes": "869966"
},
{
"name": "VimL",
"bytes": "14876"
},
{
"name": "Yacc",
"bytes": "2337"
},
{
"name": "Zimpl",
"bytes": "1115"
}
],
"symlink_target": ""
}
|
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher, prefetch_related_objects
from django.db.models.sql import Query
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Article, Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book,
Bookmark, BookReview, BookWithYear, Comment, Department, Employee,
FavoriteAuthors, House, LessonEntry, ModelIterableSubclass, Person,
Qualification, Reader, Room, TaggedItem, Teacher, WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed after going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_prefetch_eq(self):
prefetch_1 = Prefetch('authors', queryset=Author.objects.all())
prefetch_2 = Prefetch('books', queryset=Book.objects.all())
self.assertEqual(prefetch_1, prefetch_1)
self.assertEqual(prefetch_1, mock.ANY)
self.assertNotEqual(prefetch_1, prefetch_2)
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(Book.objects.prefetch_related('authors'))
self.assertEqual(add_q_mock.call_count, 1)
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
).prefetch_related('authors')
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related('authors').raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all()),
),
[['houses', 'rooms']],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch('houses', to_attr='some_attr_name'),
Prefetch('houses', queryset=House.objects.prefetch_related('main_room')),
)
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=occupants))
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [['occupants', 'houses', 'main_room']])
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(House.objects.prefetch_related(
Prefetch('occupants', queryset=Person.objects.all())
))
self.assertEqual(add_q_mock.call_count, 1)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_prefetch_GFK_uuid_pk(self):
article = Article.objects.create(name='Django')
Comment.objects.create(comment='awesome', content_object_uuid=article)
qs = Comment.objects.prefetch_related('content_object_uuid')
self.assertEqual([c.content_object_uuid for c in qs], [article])
def test_prefetch_GFK_fk_pk(self):
book = Book.objects.create(title='Poems')
book_with_year = BookWithYear.objects.create(book=book, published_year=2019)
Comment.objects.create(comment='awesome', content_object=book_with_year)
qs = Comment.objects.prefetch_related('content_object')
self.assertEqual([c.content_object for c in qs], [book_with_year])
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
databases = {'default', 'other'}
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefetchedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set')
with self.assertNumQueries(0):
self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1])
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], 'bookreview_set')
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1, new_review])
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
{
"content_hash": "d8eec59d60dcc5df31352a708f2e9f50",
"timestamp": "",
"source": "github",
"line_count": 1570,
"max_line_length": 119,
"avg_line_length": 43.55796178343949,
"alnum_prop": 0.5958090837305882,
"repo_name": "georgemarshall/django",
"id": "5b944a456bad1d8cb566c9c425462ffdba6b6ba9",
"size": "68386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/prefetch_related/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53023"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12112373"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from vis import cvmgr
import simplejson as json
imgpath = "/home/deepak/work/ml-api/data/__vision/demo/people.jpg"
print(cvmgr.detectfaces(imgpath))
imgpath = "/home/deepak/work/ml-api/data/__vision/demo/ocr.jpg"
print(cvmgr.extracttext(imgpath, "blur"))
|
{
"content_hash": "34870a566ddfa914ab6d3f3ad117051c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 32,
"alnum_prop": 0.76171875,
"repo_name": "deepakkumar1984/sia-cog",
"id": "fcf628e7dc5877f2ab800bcb5cea38c95da313b2",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/testvision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10425"
},
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "Python",
"bytes": "394909"
},
{
"name": "Shell",
"bytes": "1732"
}
],
"symlink_target": ""
}
|
"""Analyzes a text file and computes metrics"""
import os
import unittest
def analyze_text(filename):
lines = 0
chars = 0
with open(filename, mode='rt', encoding='utf-8') as file:
for line in file:
lines += 1
chars += len(line)
return lines, chars
class TextAnalysisTests(unittest.TestCase):
"""Tests for the ''analyze_text()'' function."""
def setUp(self):
"""Fixture that creates a file for the text methods to use."""
self.filename = 'text_analysis_test_file.txt'
with open(self.filename, 'w') as f:
f.write('Now we are engaged in a great civil war.\n'
'testing whether that nation,\n'
'or any nation so conceived and so dedicated,\n'
'can long endure.')
def tearDown(self):
"""Fixture that deletes the files used by the test methods."""
try:
os.remove(self.filename)
except:
pass
def test_function_runs(self):
"""Basic smoke test: does the function run"""
analyze_text(self.filename)
def test_line_count(self):
"""Check that the line count is correct."""
self.assertEqual(analyze_text(self.filename)[0], 4)
def test_character_count(self):
"""Check that the character count is correct"""
self.assertEqual(analyze_text(self.filename)[1], 131)
def test_no_such_file(self):
"""Check the proper exception is thrown for a missing file."""
with self.assertRaises(IOError):
analyze_text("nonexistent")
def test_no_deletion(self):
"""Check that the function doesn't delete the input file"""
analyze_text(self.filename)
self.assertTrue(os.path.exists(self.filename))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "01249a91b76ee9c3560ad27d885bc308",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 70,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.5962162162162162,
"repo_name": "kentoj/python-fundamentals",
"id": "1ba1a790fad5e9515c3ad5ce4aabd2e20fad37d9",
"size": "1850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text_analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20605"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SvdOpTest(test.TestCase):
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.svd(vector)
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, u1, v1, s2, u2, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = sess.run(all_ops)
for i in range(2):
s = 6 * i
self.assertAllEqual(val[s], val[s + 3]) # s1 == s2
self.assertAllEqual(val[s + 1], val[s + 4]) # u1 == u2
self.assertAllEqual(val[s + 2], val[s + 5]) # v1 == v2
for i in range(2):
s = 12 + 2 * i
self.assertAllEqual(val[s], val[s + 1]) # s1 == s2
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
self.assertAllClose(x, y, atol=(x[0] + y[0]) * tol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity.eval(), self.evaluate(xx), atol=tol)
def Test(self):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
class SvdGradOpTest(test.TestCase):
pass # Filled in below
def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(
tf_a, compute_uv=True, full_matrices=full_matrices_)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
shape = batch_dims + (rows, cols)
for use_static_shape in True, False:
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), use_static_shape,
compute_uv, full_matrices)
_AddTest(SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64]
+ [np.complex64, np.complex128] * (not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), compute_uv,
full_matrices)
_AddTest(SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, shape, compute_uv, full_matrices))
test.main()
|
{
"content_hash": "6b521bc0e03e76087316c40a7b23fd4d",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 87,
"avg_line_length": 39.59782608695652,
"alnum_prop": 0.5811144660993687,
"repo_name": "brchiu/tensorflow",
"id": "32c97a7b1914b02169246fed90b486e610743c34",
"size": "11618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/svd_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import getpass
import logging
import os
import sys
import paramiko
from msshcopyid.constants import DEFAULT_SSH_CONFIG, DEFAULT_SSH_PORT
import msshcopyid
logger = logging.getLogger(__name__)
def get_password(from_stdin_only=False):
"""
Get a password either from STDIN or by prompting the user.
:return: the password.
"""
if not sys.stdin.isatty():
password = sys.stdin.readline().strip()
elif not from_stdin_only:
password = getpass.getpass('Enter the password: ')
else:
password = None
return password
def load_ssh_config(config=DEFAULT_SSH_CONFIG):
ssh_config = paramiko.config.SSHConfig()
if os.path.isfile(config):
with open(config) as fh:
ssh_config.parse(fh)
logger.debug('Loaded SSH configuration from [%s]', config)
else:
logger.debug('SSH config file "{0}" not found.'.format(config))
return ssh_config
def parse_hosts(hosts, ssh_port=None, ssh_config=None):
"""
Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects.
The information about the host are taken in this order of priority:
- host:
- from the host (string) itself.
- user:
- from the host (string) itself.
- from the `paramiko.config.SSHConfig` object.
- current logged user.
- port:
- from the function argument `port`.
- from the `paramiko.config.SSHConfig` object.
- default SSH port: 22
:param hosts: list of hosts (string). Eg: ['server1', 'user1@server2']
:param ssh_config: a `paramiko.config.SSHConfig` object.
:return: a list of `msshcopyid.Host` objects.
"""
host_list = [] # list of Host objects
current_user = getpass.getuser()
for host in hosts:
# host_info = {'hostname': 'server1', 'hashknownhosts': 'no', 'user': 'user1'}
if ssh_config is not None:
host_info = ssh_config.lookup(host)
else:
host_info = {}
# hostname & user
if '@' in host:
user, hostname = host.split('@', 1)
else:
hostname = host
user = host_info.get('user', current_user)
# port
port = ssh_port or host_info.get('port', DEFAULT_SSH_PORT)
host_list.append(msshcopyid.Host(hostname=hostname, port=port, user=user))
return host_list
|
{
"content_hash": "dc89a0dc94a6b784539c9da41774bbad",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 86,
"avg_line_length": 28.38095238095238,
"alnum_prop": 0.6182885906040269,
"repo_name": "samuel-phan/mssh-copy-id",
"id": "31444a83deade5bc1ba550bde08cecf24140061e",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msshcopyid/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "80866"
},
{
"name": "Shell",
"bytes": "549"
}
],
"symlink_target": ""
}
|
import os
from utils import DotDict, namedtuple_with_defaults, zip_namedtuple, config_as_dict
RandCropper = namedtuple_with_defaults('RandCropper',
'min_crop_scales, max_crop_scales, \
min_crop_aspect_ratios, max_crop_aspect_ratios, \
min_crop_overlaps, max_crop_overlaps, \
min_crop_sample_coverages, max_crop_sample_coverages, \
min_crop_object_coverages, max_crop_object_coverages, \
max_crop_trials',
[0.0, 1.0,
0.5, 2.0,
0.0, 1.0,
0.0, 1.0,
0.0, 1.0,
25])
RandPadder = namedtuple_with_defaults('RandPadder',
'rand_pad_prob, max_pad_scale, fill_value',
[0.0, 1.0, 127])
ColorJitter = namedtuple_with_defaults('ColorJitter',
'random_hue_prob, max_random_hue, \
random_saturation_prob, max_random_saturation, \
random_illumination_prob, max_random_illumination, \
random_contrast_prob, max_random_contrast',
[0.0, 18,
0.0, 32,
0.0, 32,
0.0, 0.5])
cfg = DotDict()
cfg.ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# training configs
cfg.train = DotDict()
# random cropping samplers
cfg.train.rand_crop_samplers = [
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.1),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.3),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.5),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.7),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.9),]
cfg.train.crop_emit_mode = 'center'
# cfg.train.emit_overlap_thresh = 0.4
# random padding
cfg.train.rand_pad = RandPadder(rand_pad_prob=0.5, max_pad_scale=4.0)
# random color jitter
cfg.train.color_jitter = ColorJitter(random_hue_prob=0.5, random_saturation_prob=0.5,
random_illumination_prob=0.5, random_contrast_prob=0.5)
cfg.train.inter_method = 10 # random interpolation
cfg.train.rand_mirror_prob = 0.5
cfg.train.shuffle = True
cfg.train.seed = 233
cfg.train.preprocess_threads = 48
cfg.train = config_as_dict(cfg.train) # convert to normal dict
# validation
cfg.valid = DotDict()
cfg.valid.rand_crop_samplers = []
cfg.valid.rand_pad = RandPadder()
cfg.valid.color_jitter = ColorJitter()
cfg.valid.rand_mirror_prob = 0
cfg.valid.shuffle = False
cfg.valid.seed = 0
cfg.valid.preprocess_threads = 32
cfg.valid = config_as_dict(cfg.valid) # convert to normal dict
|
{
"content_hash": "7deea9076b120ed3394f164b8d80b432",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 85,
"avg_line_length": 34.10294117647059,
"alnum_prop": 0.6994394135403191,
"repo_name": "lxn2/mxnet",
"id": "278b770febe97b6cb489e4b8ec23baebd5d0784a",
"size": "2319",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "example/ssd/config/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "89393"
},
{
"name": "C++",
"bytes": "3189126"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "566898"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40032"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "615878"
},
{
"name": "Perl6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3084885"
},
{
"name": "R",
"bytes": "280777"
},
{
"name": "Scala",
"bytes": "855146"
},
{
"name": "Shell",
"bytes": "109919"
}
],
"symlink_target": ""
}
|
'''@file dnn.py
The DNN neural network classifier'''
import tensorflow as tf
from nabu.neuralnetworks.classifiers import classifier, layer, activation
class DNN(classifier.Classifier):
'''a DNN classifier'''
def _get_outputs(self, inputs, input_seq_length, targets=None,
target_seq_length=None, is_training=False):
'''
Add the neural net variables and operations to the graph
Args:
inputs: the inputs to the neural network, this is a
[batch_size x max_input_length x feature_dim] tensor
input_seq_length: The sequence lengths of the input utterances, this
is a [batch_size] vector
targets: the targets to the neural network, this is a
[batch_size x max_output_length] tensor. The targets can be
used during training
target_seq_length: The sequence lengths of the target utterances,
this is a [batch_size] vector
is_training: whether or not the network is in training mode
Returns:
A pair containing:
- output logits
- the output logits sequence lengths as a vector
'''
#build the activation function
#batch normalisation
if self.conf['batch_norm'] == 'True':
act = activation.Batchnorm(None)
else:
act = None
#non linearity
if self.conf['nonlin'] == 'relu':
act = activation.TfActivation(act, tf.nn.relu)
elif self.conf['nonlin'] == 'sigmoid':
act = activation.TfActivation(act, tf.nn.sigmoid)
elif self.conf['nonlin'] == 'tanh':
act = activation.TfActivation(act, tf.nn.tanh)
elif self.conf['nonlin'] == 'linear':
act = activation.TfActivation(act, lambda(x): x)
else:
raise Exception('unkown nonlinearity')
#L2 normalization
if self.conf['l2_norm'] == 'True':
act = activation.L2Norm(act)
#dropout
if float(self.conf['dropout']) < 1:
act = activation.Dropout(act, float(self.conf['dropout']))
#input and hidden layer
hidlayer = layer.Linear(int(self.conf['num_units']))
#output layer
outlayer = layer.Linear(self.output_dim)
#do the forward computation
activations = [None]*int(self.conf['num_layers'])
activations[0] = act(hidlayer(inputs, 'layer0'), is_training)
for l in range(1, int(self.conf['num_layers'])):
activations[l] = act(hidlayer(activations[l-1],
'layer' + str(l)), is_training)
logits = activations[-1]
logits = outlayer(logits, 'layer' + self.conf['num_layers'])
return logits, input_seq_length
|
{
"content_hash": "55e8d622a493d18caef8290429b00c57",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 36.0253164556962,
"alnum_prop": 0.5801124385101898,
"repo_name": "JeroenBosmans/nabu",
"id": "7b70be50e967870a231c1992443ee3138946195f",
"size": "2846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/neuralnetworks/classifiers/asr/dnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395778"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
}
|
"""
Setup file for zipline_poloniex.
This file was generated with PyScaffold 2.5.7, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
|
{
"content_hash": "4a1768b5cf2acf0c651c8e197a3c1067",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 26.761904761904763,
"alnum_prop": 0.6619217081850534,
"repo_name": "FlorianWilhelm/zipline-poloniex",
"id": "b8c16d364867931687f45ad0f0597ea3670eb0c1",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "47954"
},
{
"name": "Python",
"bytes": "15488"
}
],
"symlink_target": ""
}
|
"""
Django settings for foos project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f7ulx424+7@tn3pycm$oz-w^+&rf8fu@61%d1xlah$g1@j%1k('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'players',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'foos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "4340e0203589862d9008840bbc74ff0f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 71,
"avg_line_length": 25.699029126213592,
"alnum_prop": 0.6894597657725727,
"repo_name": "alexkau/foos",
"id": "62c3779b434587d0dc96ea57bac9a07e068f0bb0",
"size": "2647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foos/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4572"
}
],
"symlink_target": ""
}
|
""" Utilities
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: (c) 2010-2012 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import os
import re
import sys
import urllib
import HTMLParser
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from gluon.dal import Row
from gluon.sqlhtml import SQLTABLE
from gluon.tools import Crud
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from s3validators import IS_UTC_OFFSET
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3Utils: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
def s3_debug(message, value=None):
"""
Debug Function (same name/parameters as JavaScript one)
Provide an easy, safe, systematic way of handling Debug output
(print to stdout doesn't work with WSGI deployments)
@ToDo: Should be using python's built-in logging module?
"""
output = "S3 Debug: %s" % s3_unicode(message)
if value:
output = "%s: %s" % (output, s3_unicode(value))
try:
print >> sys.stderr, output
except:
# Unicode string
print >> sys.stderr, "Debug crashed"
# =============================================================================
def s3_dev_toolbar():
"""
Developer Toolbar - ported from gluon.Response.toolbar()
Shows useful stuff at the bottom of the page in Debug mode
"""
try:
# New web2py
from gluon.dal import THREAD_LOCAL
except:
# Old web2py
from gluon.dal import thread as THREAD_LOCAL
from gluon.utils import web2py_uuid
BUTTON = TAG.button
if hasattr(THREAD_LOCAL, "instances"):
dbstats = [TABLE(*[TR(PRE(row[0]),
"%.2fms" % (row[1]*1000)) \
for row in i.db._timings]) \
for i in THREAD_LOCAL.instances]
else:
dbstats = [] # if no db or on GAE
u = web2py_uuid()
return DIV(
BUTTON("request", _onclick="$('#request-%s').slideToggle()" % u),
DIV(BEAUTIFY(current.request), _class="dbg_hidden", _id="request-%s" % u),
BUTTON("session", _onclick="$('#session-%s').slideToggle()" % u),
DIV(BEAUTIFY(current.session), _class="dbg_hidden", _id="session-%s" % u),
# Disabled response as it breaks S3SearchLocationWidget
#BUTTON("response", _onclick="$('#response-%s').slideToggle()" % u),
#DIV(BEAUTIFY(current.response), _class="dbg_hidden", _id="response-%s" % u),
BUTTON("db stats", _onclick="$('#db-stats-%s').slideToggle()" % u),
DIV(BEAUTIFY(dbstats), _class="dbg_hidden", _id="db-stats-%s" % u),
SCRIPT("$('.dbg_hidden').hide()")
)
# =============================================================================
def s3_mark_required(fields,
mark_required=[],
label_html=(lambda field_label:
DIV("%s:" % field_label,
SPAN(" *", _class="req")))):
"""
Add asterisk to field label if a field is required
@param fields: list of fields (or a table)
@param mark_required: list of field names which are always required
@returns: dict of labels
@todo: complete parameter description?
"""
labels = dict()
# Do we have any required fields?
_required = False
for field in fields:
if field.writable:
validators = field.requires
if isinstance(validators, IS_EMPTY_OR) and field.name not in mark_required:
# Allow notnull fields to be marked as not required
# if we populate them onvalidation
labels[field.name] = "%s:" % field.label
continue
else:
required = field.required or field.notnull or \
field.name in mark_required
if not validators and not required:
labels[field.name] = "%s:" % field.label
continue
if not required:
if not isinstance(validators, (list, tuple)):
validators = [validators]
for v in validators:
if hasattr(v, "options"):
if hasattr(v, "zero") and v.zero is None:
continue
if hasattr(v, "mark_required"):
if v.mark_required:
required = True
break
else:
continue
try:
val, error = v("")
except TypeError:
# default validator takes no args
pass
else:
if error:
required = True
break
if required:
_required = True
labels[field.name] = label_html(field.label)
else:
labels[field.name] = "%s:" % field.label
else:
labels[field.name] = "%s:" % field.label
if labels:
return (labels, _required)
else:
return None
# =============================================================================
def s3_truncate(text, length=48, nice=True):
"""
Nice truncating of text
@param text: the text
@param length: the maximum length
@param nice: do not truncate words
"""
if len(text) > length:
if nice:
return "%s..." % text[:length].rsplit(" ", 1)[0][:45]
else:
return "%s..." % text[:45]
else:
return text
# =============================================================================
def s3_split_multi_value(value):
"""
Converts a series of numbers delimited by |, or already in a
string into a list. If value = None, returns []
@todo: parameter description
@todo: is this still used?
"""
if not value:
return []
elif isinstance(value, ( str ) ):
if "[" in value:
#Remove internal lists
value = value.replace("[", "")
value = value.replace("]", "")
value = value.replace("'", "")
value = value.replace('"', "")
return eval("[" + value + "]")
else:
return re.compile('[\w\-:]+').findall(str(value))
else:
return [str(value)]
# =============================================================================
def s3_get_db_field_value(tablename=None,
fieldname=None,
look_up_value=None,
look_up_field="id",
match_case=True):
"""
Returns the value of <field> from the first record in <table_name>
with <look_up_field> = <look_up>
@param table: The name of the table
@param field: the field to find the value from
@param look_up: the value to find
@param look_up_field: the field to find <look_up> in
@type match_case: boolean
@returns:
- Field Value if there is a record
- None - if there is no matching record
Example::
s3_get_db_field_value("or_organisation", "id",
look_up = "UNDP",
look_up_field = "name" )
@todo: update parameter description
"""
db = current.db
lt = db[tablename]
lf = lt[look_up_field]
if match_case or str(lf.type) != "string":
query = (lf == look_up_value)
else:
query = (lf.lower() == str.lower(look_up_value))
if "deleted" in lt:
query = (lt.deleted == False) & query
row = db(query).select(lt[fieldname], limitby=(0, 1)).first()
return row and row[fieldname] or None
# =============================================================================
def s3_filter_staff(r):
"""
Filter out people which are already staff for this facility
@todo: make the Person-AC pick up the filter options from
the person_id field (currently not implemented)
"""
db = current.db
try:
hrtable = db.hrm_human_resource
site_id = r.record.site_id
person_id_field = r.target()[2].person_id
except:
return
query = (hrtable.site_id == site_id) & \
(hrtable.deleted == False)
staff = db(query).select(hrtable.person_id)
person_ids = [row.person_id for row in staff]
try:
person_id_field.requires.set_filter(not_filterby = "id",
not_filter_opts = person_ids)
except:
pass
# =============================================================================
def s3_format_fullname(fname=None, mname=None, lname=None, truncate=True):
"""
Formats the full name of a person
@param fname: the person's pr_person.first_name value
@param mname: the person's pr_person.middle_name value
@param lname: the person's pr_person.last_name value
@param truncate: truncate the name to max 24 characters
"""
name = ""
if fname or mname or lname:
if not fname:
fname = ""
if not mname:
mname = ""
if not lname:
lname = ""
if truncate:
fname = "%s" % s3_truncate(fname, 24)
mname = "%s" % s3_truncate(mname, 24)
lname = "%s" % s3_truncate(lname, 24, nice = False)
if not mname or mname.isspace():
name = ("%s %s" % (fname, lname)).rstrip()
else:
name = ("%s %s %s" % (fname, mname, lname)).rstrip()
if truncate:
name = s3_truncate(name, 24, nice = False)
return name
# =============================================================================
def s3_fullname(person=None, pe_id=None, truncate=True):
"""
Returns the full name of a person
@param person: the pr_person record or record_id or a list of record_ids
(last used by gis.get_representation())
@param pe_id: alternatively, the person entity ID
@param truncate: truncate the name to max 24 characters
"""
DEFAULT = ""
db = current.db
ptable = db.pr_person
record = None
query = None
rows = None
if isinstance(person, (int, long)) or str(person).isdigit():
query = (ptable.id == person) & (ptable.deleted != True)
elif isinstance(person, list):
query = (ptable.id.belongs(person)) & (ptable.deleted != True)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name)
elif person is not None:
record = person
elif pe_id is not None:
query = (ptable.pe_id == pe_id) & (ptable.deleted != True)
if not record and not rows and query is not None:
record = db(query).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
limitby=(0, 1)).first()
if record:
fname, mname, lname = "", "", ""
if "pr_person" in record:
record = record["pr_person"]
if record.first_name:
fname = record.first_name.strip()
if record.middle_name:
mname = record.middle_name.strip()
if record.last_name:
lname = record.last_name.strip()
return s3_format_fullname(fname, mname, lname, truncate)
elif rows:
represents = {}
for row in rows:
fname, mname, lname = "", "", ""
if "pr_person" in row:
record = row["pr_person"]
else:
record = row
if record.first_name:
fname = record.first_name.strip()
if record.middle_name:
mname = record.middle_name.strip()
if record.last_name:
lname = record.last_name.strip()
represent = s3_format_fullname(fname, mname, lname, truncate)
represents[record.id] = represent
return represents
else:
return DEFAULT
# =============================================================================
def s3_represent_facilities(db, site_ids, link=True):
"""
Represent Facilities
"""
table = db.org_site
sites = db(table._id.belongs(site_ids)).select(table._id,
table.instance_type)
if not sites:
return []
instance_ids = Storage()
instance_types = []
for site in sites:
site_id = site[table._id.name]
instance_type = site.instance_type
if instance_type not in instance_types:
instance_types.append(instance_type)
instance_ids[instance_type] = [site_id]
else:
instance_ids[instance_type].append(site_id)
results = []
for instance_type in instance_types:
represent = db.org_site.instance_type.represent
instance_type_nice = represent(instance_type)
c, f = instance_type.split("_")
site_ids = instance_ids[instance_type]
table = db[instance_type]
query = table.site_id.belongs(site_ids)
records = db(query).select(table.id,
table.site_id,
table.name)
for record in records:
site_str = "%s (%s)" % (record.name, instance_type_nice)
if link:
site_str = A(site_str, _href=URL(c=c,
f=f,
args=[record.id],
extension=""))
results.append((record.site_id, site_str))
return results
# =============================================================================
def s3_comments_represent(text, show_link=True):
"""
Represent Comments Fields
"""
if len(text) < 80:
return text
elif not show_link:
return "%s..." % text[:76]
else:
import uuid
unique = uuid.uuid4()
represent = DIV(
DIV(text,
_id=unique,
_class="hide popup",
_onmouseout="$('#%s').hide();" % unique
),
A("%s..." % text[:76],
_onmouseover="$('#%s').removeClass('hide').show();" % unique,
),
)
return represent
# =============================================================================
def s3_url_represent(url):
"""
Make URLs clickable
"""
if not url:
return ""
return A(url, _href=url, _target="blank")
# =============================================================================
def s3_avatar_represent(id, tablename="auth_user", _class="avatar"):
"""
Represent a User as their profile picture or Gravatar
@todo: parameter description?
"""
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db[tablename]
email = None
image = None
if tablename == "auth_user":
user = db(table.id == id).select(table.email,
limitby=(0, 1),
cache=cache).first()
if user:
email = user.email.strip().lower()
ltable = s3db.pr_person_user
itable = s3db.pr_image
query = (ltable.user_id == id) & \
(ltable.pe_id == itable.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby=(0, 1)).first()
if image:
image = image.image
elif tablename == "pr_person":
user = db(table.id == id).select(table.pe_id,
limitby=(0, 1),
cache=cache).first()
if user:
ctable = s3db.pr_contact
query = (ctable.pe_id == user.pe_id) & \
(ctable.contact_method == "EMAIL")
email = db(query).select(ctable.value,
limitby=(0, 1),
cache=cache).first()
if email:
email = email.value
itable = s3db.pr_image
query = (itable.pe_id == user.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby=(0, 1)).first()
if image:
image = image.image
size = (50, 50)
if image:
image = s3db.pr_image_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default", f="download",
args=image)
elif email:
# If no Image uploaded, try Gravatar, which also provides a nice fallback identicon
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/avatar/%s?s=50&d=identicon" % hash
else:
url = "http://www.gravatar.com/avatar/00000000000000000000000000000000?d=mm"
return IMG(_src=url,
_class=_class,
_width=size[0],
_height=size[1],
)
# =============================================================================
def s3_auth_user_represent(id):
"""
Represent a user as their email address
"""
if not id:
return current.messages.NONE
db = current.db
table = db.auth_user
user = db(table.id == id).select(table.email,
limitby=(0, 1),
cache=current.s3db.cache).first()
try:
return user.email
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def s3_auth_group_represent(opt):
"""
Represent user groups by their role names
"""
if not opt:
return current.messages.NONE
auth = current.auth
s3db = current.s3db
table = auth.settings.table_group
groups = current.db(table.id > 0).select(table.id,
table.role,
cache=s3db.cache).as_dict()
if not isinstance(opt, (list, tuple)):
opt = [opt]
roles = []
for o in opt:
try:
key = int(o)
except ValueError:
continue
if key in groups:
roles.append(groups[key]["role"])
if not roles:
return current.messages.NONE
return ", ".join(roles)
# =============================================================================
def s3_represent_name(table):
"""
Returns a represent function for the common case where we return
the name of the record.
"""
def represent(id, row=None):
if row:
return row.name
elif not id:
return current.messages.NONE
r = current.db(table._id == id).select(table.name,
limitby=(0, 1)
).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
return represent
# =============================================================================
def s3_represent_name_translate(table):
"""
Returns a represent function for the common case where we return
a translated version of the name of the record.
"""
def represent(id, row=None):
if row:
return current.T(row.name)
elif not id:
return current.messages.NONE
r = current.db(table._id == id).select(table.name,
limitby=(0, 1)
).first()
try:
return current.T(r.name)
except:
return current.messages.UNKNOWN_OPT
return represent
# =============================================================================
def s3_include_debug_css():
"""
Generates html to include the css listed in
/private/templates/<template>/css.cfg
"""
request = current.request
folder = request.folder
appname = request.application
theme = current.deployment_settings.get_theme()
css_cfg = "%s/private/templates/%s/css.cfg" % (folder, theme)
try:
f = open(css_cfg, "r")
except:
raise HTTP(500, "Theme configuration file missing: private/templates/%s/css.cfg" % theme)
files = f.readlines()
files = files[:-1]
include = ""
for file in files:
include = '%s\n<link href="/%s/static/styles/%s" rel="stylesheet" type="text/css" />' \
% (include, appname, file[:-1])
f.close()
return XML(include)
# =============================================================================
def s3_include_debug_js():
"""
Generates html to include the js scripts listed in
/static/scripts/tools/sahana.js.cfg
"""
request = current.request
folder = request.folder
appname = request.application
theme = current.deployment_settings.get_theme()
scripts_dir = os.path.join(folder, "static", "scripts")
sys.path.append(os.path.join(scripts_dir, "tools"))
import mergejsmf
configDictCore = {
".": scripts_dir,
"web2py": scripts_dir,
"S3": scripts_dir
}
configFilename = "%s/tools/sahana.js.cfg" % scripts_dir
(fs, files) = mergejsmf.getFiles(configDictCore, configFilename)
include = ""
for file in files:
include = '%s\n<script src="/%s/static/scripts/%s" type="text/javascript"></script>' \
% (include, appname, file)
return XML(include)
# =============================================================================
def s3_is_mobile_client(request):
"""
Simple UA Test whether client is a mobile device
"""
env = request.env
if env.http_x_wap_profile or env.http_profile:
return True
if env.http_accept and \
env.http_accept.find("text/vnd.wap.wml") > 0:
return True
keys = ["iphone", "ipod", "android", "opera mini", "blackberry", "palm",
"windows ce", "iemobile", "smartphone", "medi", "sk-0", "vk-v",
"aptu", "xda-", "mtv ", "v750", "p800", "opwv", "send", "xda2",
"sage", "t618", "qwap", "veri", "t610", "tcl-", "vx60", "vx61",
"lg-k", "lg-l", "lg-m", "lg-o", "lg-a", "lg-b", "lg-c", "xdag",
"lg-f", "lg-g", "sl45", "emul", "lg-p", "lg-s", "lg-t", "lg-u",
"lg-w", "6590", "t250", "qc21", "ig01", "port", "m1-w", "770s",
"n710", "ez60", "mt50", "g1 u", "vk40", "bird", "tagt", "pose",
"jemu", "beck", "go.w", "jata", "gene", "smar", "g-mo", "o2-x",
"htc_", "hei-", "fake", "qc-7", "smal", "htcp", "htcs", "craw",
"htct", "aste", "htca", "htcg", "teli", "telm", "kgt", "mwbp",
"kwc-", "owg1", "htc ", "kgt/", "htc-", "benq", "slid", "qc60",
"dmob", "blac", "smt5", "nec-", "sec-", "sec1", "sec0", "fetc",
"spv ", "mcca", "nem-", "spv-", "o2im", "m50/", "ts70", "arch",
"qtek", "opti", "devi", "winw", "rove", "winc", "talk", "pant",
"netf", "pana", "esl8", "pand", "vite", "v400", "whit", "scoo",
"good", "nzph", "mtp1", "doco", "raks", "wonu", "cmd-", "cell",
"mode", "im1k", "modo", "lg-d", "idea", "jigs", "bumb", "sany",
"vulc", "vx70", "psio", "fly_", "mate", "pock", "cdm-", "fly-",
"i230", "lge-", "lge/", "argo", "qc32", "n701", "n700", "mc21",
"n500", "midp", "t-mo", "airn", "bw-u", "iac", "bw-n", "lg g",
"erk0", "sony", "alav", "503i", "pt-g", "au-m", "treo", "ipaq",
"dang", "seri", "mywa", "eml2", "smb3", "brvw", "sgh-", "maxo",
"pg-c", "qci-", "vx85", "vx83", "vx80", "vx81", "pg-8", "pg-6",
"phil", "pg-1", "pg-2", "pg-3", "ds12", "scp-", "dc-s", "brew",
"hipt", "kddi", "qc07", "elai", "802s", "506i", "dica", "mo01",
"mo02", "avan", "kyoc", "ikom", "siem", "kyok", "dopo", "g560",
"i-ma", "6310", "sie-", "grad", "ibro", "sy01", "nok6", "el49",
"rim9", "upsi", "inno", "wap-", "sc01", "ds-d", "aur ", "comp",
"wapp", "wapr", "waps", "wapt", "wapu", "wapv", "wapy", "newg",
"wapa", "wapi", "wapj", "wapm", "hutc", "lg/u", "yas-", "hita",
"lg/l", "lg/k", "i-go", "4thp", "bell", "502i", "zeto", "ez40",
"java", "n300", "n302", "mmef", "pn-2", "newt", "1207", "sdk/",
"gf-5", "bilb", "zte-", "maui", "qc-3", "qc-2", "blaz", "r600",
"hp i", "qc-5", "moto", "cond", "motv", "virg", "ccwa", "audi",
"shar", "i-20", "samm", "sama", "sams", "sch-", "mot ", "http",
"505i", "mot-", "n502", "topl", "n505", "mobi", "3gso", "wmlb",
"ezwa", "qc12", "abac", "tdg-", "neon", "mio8", "sp01", "rozo",
"vx98", "dait", "t600", "anyw", "tx-9", "sava", "m-cr", "tsm-",
"mioa", "tsm5", "klon", "capi", "tsm3", "hcit", "libw", "lg50",
"mc01", "amoi", "lg54", "ez70", "se47", "n203", "vk52", "vk53",
"vk50", "webc", "haie", "semc", "grun", "play", "palm", "a wa",
"anny", "prox", "o2 x", "ezze", "symb", "hs-c", "pg13", "mits",
"kpt ", "qa-a", "501i", "pdxg", "iris", "pluc", "acoo", "soft",
"hpip", "iac/", "iac-", "aus ", "s55/", "vx53", "vx52", "chtm",
"meri", "merc", "your", "huaw", "cldc", "voda", "smit", "x700",
"mozz", "lexi", "up.b", "sph-", "keji", "jbro", "wig ", "attw",
"pire", "r380", "lynx", "anex", "vm40", "hd-m", "504i", "w3c ",
"c55/", "w3c-", "upg1", "t218", "tosh", "acer", "hd-t", "eric",
"hd-p", "noki", "acs-", "dbte", "n202", "tim-", "alco", "ezos",
"dall", "leno", "alca", "asus", "m3ga", "utst", "aiko", "n102",
"n101", "n100", "oran"]
ua = (env.http_user_agent or "").lower()
if [key for key in keys if key in ua]:
return True
return False
# =============================================================================
def s3_populate_browser_compatibility(request):
"""
Use WURFL for browser compatibility detection
@ToDo: define a list of features to store
"""
features = Storage(
#category = ["list","of","features","to","store"]
)
try:
from pywurfl.algorithms import TwoStepAnalysis
except ImportError:
s3_debug("pywurfl python module has not been installed, browser compatibility listing will not be populated. Download pywurfl from http://pypi.python.org/pypi/pywurfl/")
return False
import wurfl
device = wurfl.devices.select_ua(unicode(request.env.http_user_agent),
search=TwoStepAnalysis(wurfl.devices))
browser = Storage()
#for feature in device:
#if feature[0] not in category_list:
#category_list.append(feature[0])
#for category in features:
#if category in
#browser[category] = Storage()
for feature in device:
if feature[0] in features and \
feature[1] in features[feature[0]]:
browser[feature[0]][feature[1]] = feature[2]
return browser
# =============================================================================
def s3_register_validation():
"""
JavaScript client-side validation for Register form
- needed to check for passwords being same
@ToDo: Move this to JS. Internationalisation (T()) can be provided in
views/l10n.js
"""
T = current.T
request = current.request
settings = current.deployment_settings
s3 = current.response.s3
appname = current.request.application
auth = current.auth
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.min.js" % appname)
if request.cookies.has_key("registered"):
password_position = '''last'''
else:
password_position = '''first'''
if settings.get_auth_registration_mobile_phone_mandatory():
mobile = '''
mobile:{
required:true
},'''
else:
mobile = ""
if settings.get_auth_registration_organisation_required():
org1 = '''
organisation_id:{
required: true
},'''
org2 = "".join((''',
organisation_id:"''', str(T("Enter your organization")), '''"'''))
else:
org1 = ""
org2 = ""
domains = ""
if settings.get_auth_registration_organisation_hidden() and \
request.controller != "admin":
table = current.auth.settings.table_user
table.organisation_id
table = current.s3db.auth_organisation
query = (table.organisation_id != None) & \
(table.domain != None)
whitelists = db(query).select(table.organisation_id,
table.domain)
if whitelists:
domains = '''$('#auth_user_organisation_id__row').hide()
S3.whitelists={
'''
count = 0
for whitelist in whitelists:
count += 1
domains += "'%s':%s" % (whitelist.domain,
whitelist.organisation_id)
if count < len(whitelists):
domains += ",\n"
else:
domains += "\n"
domains += '''}
$('#regform #auth_user_email').blur(function(){
var email=$('#regform #auth_user_email').val()
var domain=email.split('@')[1]
if(undefined!=S3.whitelists[domain]){
$('#auth_user_organisation_id').val(S3.whitelists[domain])
}else{
$('#auth_user_organisation_id__row').show()
}
})'''
# validate signup form on keyup and submit
# @ToDo: //remote:'emailsurl'
script = "".join(( domains, '''
$('#regform').validate({
errorClass:'req',
rules:{
first_name:{
required:true
},''', mobile, '''
email:{
required:true,
email:true
},''', org1, '''
password:{
required:true
},
password_two:{
required:true,
equalTo:".password:''', password_position, '''"
}
},
messages:{
first_name:"''', str(T("Enter your first name")), '''",
password:{
required:"''', str(T("Provide a password")), '''"
},
password_two:{
required:"''', str(T("Repeat your password")), '''",
equalTo:"''', str(T("Enter the same password as above")), '''"
},
email:{
required:"''', str(T("Please enter a valid email address")), '''",
email:"''', str(T("Please enter a valid email address")), '''"
}''', org2, '''
},
errorPlacement:function(error,element){
error.appendTo(element.parent().next())
},
submitHandler:function(form){
form.submit()
}
})
var MinPasswordChar = ''', str(auth.settings.password_min_length), ''';
$('.password:''', password_position, '''').pstrength({ minchar: MinPasswordChar, minchar_label: null } );
''' ))
s3.jquery_ready.append(script)
# =============================================================================
def s3_filename(filename):
"""
Convert a string into a valid filename on all OS
http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python/698714#698714
- currently unused
"""
import string
import unicodedata
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = unicode(filename)
cleanedFilename = unicodedata.normalize("NFKD",
filename).encode("ASCII", "ignore")
return "".join(c for c in cleanedFilename if c in validFilenameChars)
# =============================================================================
def s3_has_foreign_key(field, m2m=True):
"""
Check whether a field contains a foreign key constraint
@param field: the field (Field instance)
@param m2m: also detect many-to-many links
@note: many-to-many references (list:reference) are no DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
try:
ftype = str(field.type)
except:
# Virtual Field
return False
if ftype[:9] == "reference":
return True
if m2m and ftype[:14] == "list:reference":
return True
return False
# =============================================================================
def s3_get_foreign_key(field, m2m=True):
"""
Resolve a field type into the name of the referenced table,
the referenced key and the reference type (M:1 or M:N)
@param field: the field (Field instance)
@param m2m: also detect many-to-many references
@returns: tuple (tablename, key, multiple), where tablename is
the name of the referenced table (or None if this field
has no foreign key constraint), key is the field name of
the referenced key, and multiple indicates whether this is
a many-to-many reference (list:reference) or not.
@note: many-to-many references (list:reference) are no DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
ftype = str(field.type)
if ftype[:9] == "reference":
key = ftype[10:]
multiple = False
elif m2m and ftype[:14] == "list:reference":
key = ftype[15:]
multiple = True
else:
return (None, None, None)
if "." in key:
rtablename, key = key.split(".")
else:
rtablename = key
rtable = current.s3db.table(rtablename)
if rtable:
key = rtable._id.name
else:
key = None
return (rtablename, key, multiple)
# =============================================================================
def s3_unicode(s, encoding="utf-8"):
"""
Convert an object into an unicode instance, to be used instead of
unicode(s) (Note: user data should never be converted into str).
@param s: the object
@param encoding: the character encoding
"""
if isinstance(s, unicode):
return s
try:
if not isinstance(s, basestring):
if hasattr(s, "__unicode__"):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, "strict")
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
s = " ".join([s3_unicode(arg, encoding) for arg in s])
else:
s = s.decode(encoding)
except UnicodeDecodeError:
if not isinstance(s, Exception):
raise
else:
s = " ".join([s3_unicode(arg, encoding) for arg in s])
return s
# =============================================================================
def search_vars_represent(search_vars):
"""
Unpickle and convert saved search form variables into
a human-readable HTML.
@param search_vars: the (c)pickled search form variables
@returns: HTML as string
"""
import cPickle
s = ""
search_vars = search_vars.replace("'", "'")
try:
search_vars = cPickle.loads(str(search_vars))
except:
raise HTTP(500,"ERROR RETRIEVING THE SEARCH CRITERIA")
else:
s = "<p>"
pat = '_'
for var in search_vars.iterkeys():
if var == "criteria" :
c_dict = search_vars[var]
#s = s + crud_string("pr_save_search", "Search Criteria")
for j in c_dict.iterkeys():
st = str(j)
if st[0] == '_':
continue
else:
st = st.replace("_search_", " ")
st = st.replace("_advanced", "")
st = st.replace("_simple", "")
st = st.replace("text", "text matching")
"""st = st.replace(search_vars["function"], "")
st = st.replace(search_vars["prefix"], "")"""
st = st.replace("_", " ")
s = "%s <b> %s </b>: %s <br />" % \
(s, st.capitalize(), str(c_dict[j]))
elif var == "simple" or var == "advanced":
continue
else:
if var == "function":
v1 = "Resource Name"
elif var == "prefix":
v1 = "Module"
s = "%s<b>%s</b>: %s<br />" %(s, v1, str(search_vars[var]))
s = s + "</p>"
return XML(s)
# =============================================================================
def s3_jaro_winkler(str1, str2):
"""
Return Jaro_Winkler distance of two strings (between 0.0 and 1.0)
Used as a measure of similarity between two strings
@see http://en.wikipedia.org/wiki/Jaro-Winkler_distance
@param str1: the first string
@param str2: the second string
"""
jaro_winkler_marker_char = chr(1)
if (str1 == str2):
return 1.0
if str1 == None:
return 0
if str2 == None:
return 0
len1 = len(str1)
len2 = len(str2)
halflen = max(len1, len2) / 2 - 1
ass1 = "" # Characters assigned in str1
ass2 = "" # Characters assigned in str2
workstr1 = str1
workstr2 = str2
common1 = 0 # Number of common characters
common2 = 0
# If the type is list then check for each item in
# the list and find out final common value
if isinstance(workstr2, list):
for item1 in workstr1:
for item2 in workstr2:
for i in range(len1):
start = max(0, i - halflen)
end = min(i + halflen + 1, len2)
index = item2.find(item1[i], start, end)
if (index > -1):
# Found common character
common1 += 1
ass1 = ass1 + item1[i]
item2 = item2[:index] + \
jaro_winkler_marker_char + \
item2[index + 1:]
else:
for i in range(len1):
start = max(0, i - halflen)
end = min(i + halflen + 1, len2)
index = workstr2.find(str1[i], start, end)
if (index > -1):
# Found common character
common1 += 1
ass1 = ass1 + str1[i]
workstr2 = workstr2[:index] + \
jaro_winkler_marker_char + \
workstr2[index + 1:]
# If the type is list
if isinstance(workstr1, list):
for item1 in workstr2:
for item2 in workstr1:
for i in range(len2):
start = max(0, i - halflen)
end = min(i + halflen + 1, len1)
index = item2.find(item1[i], start, end)
if (index > -1):
# Found common character
common2 += 1
ass2 = ass2 + item1[i]
item1 = item1[:index] + \
jaro_winkler_marker_char + \
item1[index + 1:]
else:
for i in range(len2):
start = max(0, i - halflen)
end = min(i + halflen + 1, len1)
index = workstr1.find(str2[i], start, end)
if (index > -1):
# Found common character
common2 += 1
ass2 = ass2 + str2[i]
workstr1 = workstr1[:index] + \
jaro_winkler_marker_char + \
workstr1[index + 1:]
if (common1 != common2):
common1 = float(common1 + common2) / 2.0
if (common1 == 0):
return 0.0
# Compute number of transpositions
if (len1 == len2):
transposition = 0
for i in range(len(ass1)):
if (ass1[i] != ass2[i]):
transposition += 1
transposition = transposition / 2.0
elif (len1 > len2):
transposition = 0
for i in range(len(ass2)): #smaller length one
if (ass1[i] != ass2[i]):
transposition += 1
while (i < len1):
transposition += 1
i += 1
transposition = transposition / 2.0
elif (len1 < len2):
transposition = 0
for i in range(len(ass1)): #smaller length one
if (ass1[i] != ass2[i]):
transposition += 1
while (i < len2):
transposition += 1
i += 1
transposition = transposition / 2.0
# Compute number of characters common to beginning of both strings,
# for Jaro-Winkler distance
minlen = min(len1, len2)
for same in range(minlen + 1):
if (str1[:same] != str2[:same]):
break
same -= 1
if (same > 4):
same = 4
common1 = float(common1)
w = 1. / 3. * (common1 / float(len1) + \
common1 / float(len2) + \
(common1 - transposition) / common1)
wn = w + same * 0.1 * (1.0 - w)
if (wn < 0.0):
wn = 0.0
elif (wn > 1.0):
wn = 1.0
return wn
# =============================================================================
def s3_jaro_winkler_distance_row(row1, row2):
"""
Calculate the percentage match for two db records
@todo: parameter description?
"""
dw = 0
num_similar = 0
if len(row1) != len(row2):
#print "The records columns does not match."
return
for x in range(0, len(row1)):
str1 = row1[x] # get row fields
str2 = row2[x] # get row fields
dw += s3_jaro_winkler(str1, str2) #Calculate match value for two column values
dw = dw / len(row1) # Average of all column match value.
dw = dw * 100 # Convert to percentage
return dw
# =============================================================================
def soundex(name, len=4):
"""
Code referenced from http://code.activestate.com/recipes/52213-soundex-algorithm/
@todo: parameter description?
"""
# digits holds the soundex values for the alphabet
digits = "01230120022455012623010202"
sndx = ""
fc = ""
# Translate alpha chars in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
# remember first letter
fc = c
d = digits[ord(c)-ord("A")]
# duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# replace first digit with first alpha character
sndx = fc + sndx[1:]
# remove all 0s from the soundex code
sndx = sndx.replace("0", "")
# return soundex code padded to len characters
return (sndx + (len * "0"))[:len]
# =============================================================================
def sort_dict_by_values(adict):
"""
Sort a dict by value and return an OrderedDict
- used by modules/eden/irs.py
"""
return OrderedDict(sorted(adict.items(), key = lambda item: item[1]))
# =============================================================================
class CrudS3(Crud):
"""
S3 extension of the gluon.tools.Crud class
- select() uses SQLTABLES3 (to allow different linkto construction)
@todo: is this still used anywhere?
"""
def __init__(self):
""" Initialise parent class & make any necessary modifications """
Crud.__init__(self, current.db)
def select(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers={},
**attr):
db = current.db
request = current.request
if not (isinstance(table, db.Table) or table in db.tables):
raise HTTP(404)
if not self.has_permission("select", table):
redirect(current.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission("select", table):
# redirect(current.auth.settings.on_failed_authorization)
if not isinstance(table, db.Table):
table = db[table]
if not query:
query = table.id > 0
if not fields:
fields = [table.ALL]
rows = db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
if not rows:
return None # Nicer than an empty table.
if not "linkto" in attr:
attr["linkto"] = self.url(args="read")
if not "upload" in attr:
attr["upload"] = self.url("download")
if request.extension != "html":
return rows.as_list()
return SQLTABLES3(rows, headers=headers, **attr)
# =============================================================================
class SQLTABLES3(SQLTABLE):
"""
S3 custom version of gluon.sqlhtml.SQLTABLE
Given a SQLRows object, as returned by a db().select(), generates
an html table with the rows.
- we need a different linkto construction for our CRUD controller
- we need to specify a different ID field to direct to for the M2M controller
- used by S3Resource.sqltable
Optional arguments:
@keyword linkto: URL (or lambda to generate a URL) to edit individual records
@keyword upload: URL to download uploaded files
@keyword orderby: Add an orderby link to column headers.
@keyword headers: dictionary of headers to headers redefinions
@keyword truncate: length at which to truncate text in table cells.
Defaults to 16 characters.
Optional names attributes for passed to the <table> tag
Simple linkto example::
rows = db.select(db.sometable.ALL)
table = SQLTABLES3(rows, linkto="someurl")
This will link rows[id] to .../sometable/value_of_id
More advanced linkto example::
def mylink(field):
return URL(args=[field])
rows = db.select(db.sometable.ALL)
table = SQLTABLES3(rows, linkto=mylink)
This will link rows[id] to::
current_app/current_controller/current_function/value_of_id
"""
def __init__(self, sqlrows,
linkto=None,
upload=None,
orderby=None,
headers={},
truncate=16,
columns=None,
th_link="",
**attributes):
# reverted since it causes errors (admin/user & manual importing of req/req/import)
# super(SQLTABLES3, self).__init__(**attributes)
TABLE.__init__(self, **attributes)
self.components = []
self.attributes = attributes
self.sqlrows = sqlrows
(components, row) = (self.components, [])
if not columns:
columns = sqlrows.colnames
if headers=="fieldname:capitalize":
headers = {}
for c in columns:
headers[c] = " ".join([w.capitalize() for w in c.split(".")[-1].split("_")])
elif headers=="labels":
headers = {}
for c in columns:
(t, f) = c.split(".")
field = sqlrows.db[t][f]
headers[c] = field.label
if headers!=None:
for c in columns:
if orderby:
row.append(TH(A(headers.get(c, c),
_href=th_link+"?orderby=" + c)))
else:
row.append(TH(headers.get(c, c)))
components.append(THEAD(TR(*row)))
tbody = []
table_field = re.compile("[\w_]+\.[\w_]+")
for (rc, record) in enumerate(sqlrows):
row = []
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
for colname in columns:
if not table_field.match(colname):
if "_extra" in record and colname in record._extra:
r = record._extra[colname]
row.append(TD(r))
continue
else:
raise KeyError("Column %s not found (SQLTABLE)" % colname)
(tablename, fieldname) = colname.split(".")
try:
field = sqlrows.db[tablename][fieldname]
except (KeyError, AttributeError):
field = None
if tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row):
r = record[tablename][fieldname]
elif fieldname in record:
r = record[fieldname]
else:
raise SyntaxError("something wrong in Rows object")
r_old = r
if not field:
pass
elif linkto and field.type == "id":
#try:
#href = linkto(r, "table", tablename)
#except TypeError:
#href = "%s/%s/%s" % (linkto, tablename, r_old)
#r = A(r, _href=href)
try:
href = linkto(r)
except TypeError:
href = "%s/%s" % (linkto, r)
r = A(r, _href=href)
#elif linkto and field.type.startswith("reference"):
#ref = field.type[10:]
#try:
#href = linkto(r, "reference", ref)
#except TypeError:
#href = "%s/%s/%s" % (linkto, ref, r_old)
#if ref.find(".") >= 0:
#tref,fref = ref.split(".")
#if hasattr(sqlrows.db[tref],"_primarykey"):
#href = "%s/%s?%s" % (linkto, tref, urllib.urlencode({fref:r}))
#r = A(str(r), _href=str(href))
elif linkto \
and hasattr(field._table, "_primarykey") \
and fieldname in field._table._primarykey:
# have to test this with multi-key tables
key = urllib.urlencode(dict([ \
((tablename in record \
and isinstance(record, Row) \
and isinstance(record[tablename], Row)) \
and (k, record[tablename][k])) \
or (k, record[k]) \
for k in field._table._primarykey]))
r = A(r, _href="%s/%s?%s" % (linkto, tablename, key))
elif field.type.startswith("list:"):
r = field.represent(r or [])
elif field.represent:
r = field.represent(r)
elif field.type.startswith("reference"):
pass
elif field.type == "blob" and r:
r = "DATA"
elif field.type == "upload":
if upload and r:
r = A("file", _href="%s/%s" % (upload, r))
elif r:
r = "file"
else:
r = ""
elif field.type in ["string", "text"]:
r = str(field.formatter(r))
ur = unicode(r, "utf8")
if truncate!=None and len(ur) > truncate:
r = ur[:truncate - 3].encode("utf8") + "..."
row.append(TD(r))
tbody.append(TR(_class=_class, *row))
components.append(TBODY(*tbody))
# =============================================================================
class S3BulkImporter(object):
"""
Import CSV files of data to pre-populate the database.
Suitable for use in Testing, Demos & Simulations
"""
def __init__(self):
""" Constructor """
import csv
from xml.sax.saxutils import unescape
self.csv = csv
self.unescape = unescape
self.importTasks = []
self.specialTasks = []
self.tasks = []
self.alternateTables = {"hrm_person": {"tablename":"pr_person",
"prefix":"pr",
"name":"person"},
"member_person": {"tablename":"pr_person",
"prefix":"pr",
"name":"person"},
}
self.errorList = []
self.resultList = []
# -------------------------------------------------------------------------
def load_descriptor(self, path):
""" Method that will load the descriptor file and then all the
import tasks in that file into the importTasks property.
The descriptor file is the file called tasks.txt in path.
The file consists of a comma separated list of:
application, resource name, csv filename, xsl filename.
"""
source = open(os.path.join(path, "tasks.cfg"), "r")
values = self.csv.reader(source)
for details in values:
if details == []:
continue
prefix = details[0][0].strip('" ')
if prefix == "#": # comment
continue
if prefix == "*": # specialist function
self.extractSpecialistLine(path, details)
else: # standard importer
self.extractImporterLine(path, details)
# -------------------------------------------------------------------------
def extractImporterLine(self, path, details):
"""
Method that extract the details for an import Task
"""
argCnt = len(details)
if argCnt == 4 or argCnt == 5:
# remove any spaces and enclosing double quote
app = details[0].strip('" ')
res = details[1].strip('" ')
request = current.request
csvFileName = details[2].strip('" ')
if csvFileName[:7] == "http://":
csv = csvFileName
else:
(csvPath, csvFile) = os.path.split(csvFileName)
if csvPath != "":
path = os.path.join(request.folder,
"private",
"templates",
csvPath)
csv = os.path.join(path, csvFile)
xslFileName = details[3].strip('" ')
templateDir = os.path.join(request.folder,
"static",
"formats",
"s3csv",
)
# try the app directory in the templates directory first
xsl = os.path.join(templateDir, app, xslFileName)
_debug("%s %s" % (xslFileName, xsl))
if os.path.exists(xsl) == False:
# now try the templates directory
xsl = os.path.join(templateDir, xslFileName)
_debug ("%s %s" % (xslFileName, xsl))
if os.path.exists(xsl) == False:
# use the same directory as the csv file
xsl = os.path.join(path, xslFileName)
_debug ("%s %s" % (xslFileName, xsl))
if os.path.exists(xsl) == False:
self.errorList.append(
"Failed to find a transform file %s, Giving up." % xslFileName)
return
vars = None
if argCnt == 5:
vars = details[4]
self.tasks.append([1, app, res, csv, xsl, vars])
self.importTasks.append([app, res, csv, xsl, vars])
else:
self.errorList.append(
"prepopulate error: job not of length 4. %s job ignored" % task)
# -------------------------------------------------------------------------
def extractSpecialistLine(self, path, details):
""" Method that will store a single import job into
the importTasks property.
"""
function = details[1].strip('" ')
csv = None
if len(details) == 3:
fileName = details[2].strip('" ')
(csvPath, csvFile) = os.path.split(fileName)
if csvPath != "":
path = os.path.join(current.request.folder,
"private",
"templates",
csvPath)
csv = os.path.join(path, csvFile)
extraArgs = None
if len(details) == 4:
extraArgs = details[3].strip('" ')
self.tasks.append([2, function, csv, extraArgs])
self.specialTasks.append([function, csv, extraArgs])
# -------------------------------------------------------------------------
def load_import(self, controller, csv, xsl):
""" Method that will store a single import job into
the importTasks property.
"""
self.importTasks.append([controller, csv, xsl])
# -------------------------------------------------------------------------
def execute_import_task(self, task):
""" Method that will execute each import job, in order """
start = datetime.datetime.now()
if task[0] == 1:
db = current.db
request = current.request
response = current.response
errorString = "prepopulate error: file %s missing"
# Store the view
view = response.view
_debug ("Running job %s %s (filename=%s transform=%s)" % (task[1], task[2], task[3], task[4]))
prefix = task[1]
name = task[2]
tablename = "%s_%s" % (prefix, name)
if tablename in self.alternateTables:
details = self.alternateTables[tablename]
if "tablename" in details:
tablename = details["tablename"]
current.s3db.table(tablename)
if "loader" in details:
loader = details["loader"]
if loader is not None:
loader()
if "prefix" in details:
prefix = details["prefix"]
if "name" in details:
name = details["name"]
try:
resource = current.s3db.resource(tablename)
except AttributeError:
# Table cannot be loaded
self.errorList.append("WARNING: Unable to find table %s import job skipped" % tablename)
return
# Check if the source file is accessible
filename = task[3]
if filename[:7] == "http://":
import urllib2
req = urllib2.Request(url=filename)
try:
f = urllib2.urlopen(req)
except urllib2.HTTPError, e:
self.errorList.append("Could not access %s: %s" % (filename, e.read()))
return
except:
self.errorList.append(errorString % filename)
return
else:
csv = f
else:
try:
csv = open(filename, "r")
except IOError:
self.errorList.append(errorString % filename)
return
# Check if the stylesheet is accessible
try:
open(task[4], "r")
except IOError:
self.errorList.append(errorString % task[4])
return
extra_data = None
if task[5]:
try:
extradata = self.unescape(task[5], {"'": '"'})
extradata = json.loads(extradata)
extra_data = extradata
except:
self.errorList.append("WARNING:5th parameter invalid, parameter %s ignored" % task[5])
try:
# @todo: add extra_data and file attachments
result = resource.import_xml(csv,
format="csv",
stylesheet=task[4],
extra_data=extra_data)
except SyntaxError, e:
self.errorList.append("WARNING: import error - %s" % e)
return
if not resource.error:
db.commit()
else:
# Must roll back if there was an error!
error = resource.error
self.errorList.append("%s - %s: %s" % (
task[3], resource.tablename, error))
errors = current.xml.collect_errors(resource)
if errors:
self.errorList.extend(errors)
db.rollback()
# Restore the view
response.view = view
end = datetime.datetime.now()
duration = end - start
csvName = task[3][task[3].rfind("/")+1:]
try:
# Python-2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
msg = "%s import job completed in %s mins" % (csvName, duration)
except AttributeError:
# older Python
msg = "%s import job completed in %s" % (csvName, duration)
self.resultList.append(msg)
if response.s3.debug:
s3_debug(msg)
# -------------------------------------------------------------------------
def execute_special_task(self, task):
"""
"""
start = datetime.datetime.now()
s3 = current.response.s3
if task[0] == 2:
fun = task[1]
csv = task[2]
extraArgs = task[3]
if csv is None:
if extraArgs is None:
error = s3[fun]()
else:
error = s3[fun](extraArgs)
elif extraArgs is None:
error = s3[fun](csv)
else:
error = s3[fun](csv, extraArgs)
if error:
self.errorList.append(error)
end = datetime.datetime.now()
duration = end - start
try:
# Python-2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
msg = "%s import job completed in %s mins" % (fun, duration)
except AttributeError:
# older Python
msg = "%s import job completed in %s" % (fun, duration)
self.resultList.append(msg)
if s3.debug:
s3_debug(msg)
# -------------------------------------------------------------------------
def import_role(self, filename):
""" Import Roles from CSV """
# Check if the source file is accessible
try:
openFile = open(filename, "r")
except IOError:
return "Unable to open file %s" % filename
auth = current.auth
acl = auth.permission
create_role = auth.s3_create_role
def parseACL(_acl):
permissions = _acl.split("|")
aclValue = 0
for permission in permissions:
if permission == "READ":
aclValue = aclValue | acl.READ
if permission == "CREATE":
aclValue = aclValue | acl.CREATE
if permission == "UPDATE":
aclValue = aclValue | acl.UPDATE
if permission == "DELETE":
aclValue = aclValue | acl.DELETE
if permission == "ALL":
aclValue = aclValue | acl.ALL
return aclValue
reader = self.csv.DictReader(openFile)
roles = {}
acls = {}
args = {}
for row in reader:
if row != None:
role = row["role"]
if "description" in row:
desc = row["description"]
else:
desc = ""
rules = {}
extra_param = {}
if "controller" in row and row["controller"]:
rules["c"] = row["controller"]
if "function" in row and row["function"]:
rules["f"] = row["function"]
if "table" in row and row["table"]:
rules["t"] = row["table"]
if row["oacl"]:
rules["oacl"] = parseACL(row["oacl"])
if row["uacl"]:
rules["uacl"] = parseACL(row["uacl"])
#if "org" in row and row["org"]:
#rules["organisation"] = row["org"]
#if "facility" in row and row["facility"]:
#rules["facility"] = row["facility"]
if "entity" in row and row["entity"]:
rules["entity"] = row["entity"]
if "hidden" in row and row["hidden"]:
extra_param["hidden"] = row["hidden"]
if "system" in row and row["system"]:
extra_param["system"] = row["system"]
if "protected" in row and row["protected"]:
extra_param["protected"] = row["protected"]
if "uid" in row and row["uid"]:
extra_param["uid"] = row["uid"]
if role in roles:
acls[role].append(rules)
else:
roles[role] = [role,desc]
acls[role] = [rules]
if len(extra_param) > 0 and role not in args:
args[role] = extra_param
for rulelist in roles.values():
if rulelist[0] in args:
create_role(rulelist[0],
rulelist[1],
*acls[rulelist[0]],
**args[rulelist[0]])
else:
create_role(rulelist[0],
rulelist[1],
*acls[rulelist[0]])
# -------------------------------------------------------------------------
def clear_tasks(self):
""" Clear the importTask list """
self.tasks = []
# -------------------------------------------------------------------------
def perform_tasks(self, path):
""" convenience method that will load and then execute the import jobs
that are listed in the descriptor file
"""
self.load_descriptor(path)
for task in self.tasks:
if task[0] == 1:
self.execute_import_task(task)
elif task[0] == 2:
self.execute_special_task(task)
# =============================================================================
class S3DateTime(object):
"""
Toolkit for date+time parsing/representation
"""
# -------------------------------------------------------------------------
@staticmethod
def date_represent(date, utc=False):
"""
Represent the date according to deployment settings &/or T()
@param date: the date
@param utc: the date is given in UTC
"""
session = current.session
settings = current.deployment_settings
format = settings.get_L10n_date_format()
if date and isinstance(date, datetime.datetime) and utc:
offset = IS_UTC_OFFSET.get_offset_value(session.s3.utc_offset)
if offset:
date = date + datetime.timedelta(seconds=offset)
if date:
return date.strftime(str(format))
else:
return current.messages.NONE
# -----------------------------------------------------------------------------
@staticmethod
def time_represent(time, utc=False):
"""
Represent the date according to deployment settings &/or T()
@param time: the time
@param utc: the time is given in UTC
"""
session = current.session
settings = current.deployment_settings
format = settings.get_L10n_time_format()
if time and utc:
offset = IS_UTC_OFFSET.get_offset_value(session.s3.utc_offset)
if offset:
time = time + datetime.timedelta(seconds=offset)
if time:
return time.strftime(str(format))
else:
return current.messages.NONE
# -----------------------------------------------------------------------------
@staticmethod
def datetime_represent(dt, utc=False):
"""
Represent the datetime according to deployment settings &/or T()
@param dt: the datetime
@param utc: the datetime is given in UTC
"""
session = current.session
xml = current.xml
if dt and utc:
offset = IS_UTC_OFFSET.get_offset_value(session.s3.utc_offset)
if offset:
dt = dt + datetime.timedelta(seconds=offset)
if dt:
return xml.encode_local_datetime(dt)
else:
return current.messages.NONE
# =============================================================================
class S3MultiPath:
"""
Simplified path toolkit for managing multi-ancestor-hypergraphs
in a relational database.
MultiPaths allow single-query searches for all ancestors and
descendants of a node, as well as single-query affiliation
testing - whereas they require multiple writes on update (one
per each descendant node), so they should only be used for
hypergraphs which rarely change.
Every node of the hypergraph contains a path attribute, with the
following MultiPath-syntax:
MultiPath: <SimplePath>,<SimplePath>,...
SimplePath: [|<Node>|<Node>|...|]
Node: ID of the ancestor node
SimplePaths contain only ancestors, not the node itself.
SimplePaths contain the ancestors in reverse order, i.e. the nearest
ancestor first (this is important because removing a vertex from the
path will cut off the tail, not the head)
A path like A<-B<-C can be constructed like:
path = S3MultiPath([["C", "B", "A"]])
[|C|B|A|]
Extending this path by a vertex E<-B will result in a multipath like:
path.extend("B", "E")
[|C|B|A|],[|C|B|E|]
Cutting the vertex A<-B reduces the multipath to:
path.cut("B", "A")
[|C|B|E|]
Note the reverse notation (nearest ancestor first)!
MultiPaths will be normalized automatically, i.e.:
path = S3MultiPath([["C", "B", "A", "D", "F", "B", "E", "G"]])
[|C|B|A|D|F|],[|C|B|E|G|]
"""
# -------------------------------------------------------------------------
# Construction
#
def __init__(self, paths=None):
""" Constructor """
self.paths = []
if isinstance(paths, S3MultiPath):
self.paths = list(paths.paths)
else:
if paths is None:
paths = []
elif type(paths) is str:
paths = self.__parse(paths)
elif not isinstance(paths, (list, tuple)):
paths = [paths]
append = self.append
for p in paths:
append(p)
# -------------------------------------------------------------------------
def append(self, path):
"""
Append a new ancestor path to this multi-path
@param path: the ancestor path
"""
Path = self.Path
if isinstance(path, Path):
path = path.nodes
else:
path = Path(path).nodes
multipath = None
# Normalize any recurrent paths
paths = self.__normalize(path)
append = self.paths.append
for p in paths:
p = Path(p)
if not self & p:
append(p)
multipath = self
return multipath
# -------------------------------------------------------------------------
def extend(self, head, ancestors=None, cut=None):
"""
Extend this multi-path with a new vertex ancestors<-head
@param head: the head node
@param ancestors: the ancestor (multi-)path of the head node
"""
# If ancestors is a multi-path, extend recursively with all paths
if isinstance(ancestors, S3MultiPath):
extend = self.extend
for p in ancestors.paths:
extend(head, p, cut=cut)
return self
# Split-extend all paths which contain the head node
extensions = []
Path = self.Path
append = extensions.append
for p in self.paths:
if cut:
pos = p.find(cut)
if pos > 0:
p.nodes = p.nodes[:pos-1]
i = p.find(head)
if i > 0:
path = Path(p.nodes[:i]).extend(head, ancestors)
detour = None
for tail in self.paths:
j = tail.find(path.last())
if j > 0:
# append original tail
detour = Path(path)
detour.extend(path.last(), tail[j:])
append(detour)
if not detour:
append(path)
self.paths.extend(extensions)
# Finally, cleanup for duplicate and empty paths
return self.clean()
# -------------------------------------------------------------------------
def cut(self, head, ancestor=None):
"""
Cut off the vertex ancestor<-head in this multi-path
@param head: the head node
@param ancestor: the ancestor node to cut off
"""
for p in self.paths:
p.cut(head, ancestor)
# Must cleanup for duplicates
return self.clean()
# -------------------------------------------------------------------------
def clean(self):
"""
Remove any duplicate and empty paths from this multi-path
"""
mp = S3MultiPath(self)
pop = mp.paths.pop
self.paths = []
append = self.paths.append
while len(mp):
item = pop(0)
if len(item) and not mp & item and not self & item:
append(item)
return self
# -------------------------------------------------------------------------
# Serialization/Deserialization
#
def __parse(self, value):
""" Parse a multi-path-string into nodes """
return value.split(",")
def __repr__(self):
""" Serialize this multi-path as string """
return ",".join([str(p) for p in self.paths])
def as_list(self):
""" Return this multi-path as list of node lists """
return [p.as_list() for p in self.paths if len(p)]
# -------------------------------------------------------------------------
# Introspection
#
def __len__(self):
""" The number of paths in this multi-path """
return len(self.paths)
# -------------------------------------------------------------------------
def __and__(self, sequence):
"""
Check whether sequence is the start sequence of any of
the paths in this multi-path (for de-duplication)
@param sequence: sequence of node IDs (or path)
"""
for p in self.paths:
if p.startswith(sequence):
return 1
return 0
# -------------------------------------------------------------------------
def __contains__(self, sequence):
"""
Check whether sequence is contained in any of the paths (can
also be used to check whether this multi-path contains a path
to a particular node)
@param sequence: the sequence (or node ID)
"""
for p in self.paths:
if sequence in p:
return 1
return 0
# -------------------------------------------------------------------------
def nodes(self):
""" Get all nodes from this path """
nodes = []
for p in self.paths:
n = [i for i in p.nodes if i not in nodes]
nodes.extend(n)
return nodes
# -------------------------------------------------------------------------
@staticmethod
def all_nodes(paths):
"""
Get all nodes from all paths
@param paths: list of multi-paths
"""
nodes = []
for p in paths:
n = [i for i in p.nodes() if i not in nodes]
nodes.extend(n)
return nodes
# -------------------------------------------------------------------------
# Normalization
#
@staticmethod
def __normalize(path):
"""
Normalize a path into a sequence of non-recurrent paths
@param path: the path as a list of node IDs
"""
seq = map(str, path)
l = zip(seq, seq[1:])
if not l:
return [path]
seq = S3MultiPath.__resolve(seq)
pop = seq.pop
paths = []
append = paths.append
while len(seq):
p = pop(0)
s = paths + seq
contained = False
lp = len(p)
for i in s:
if i[:lp] == p:
contained = True
break
if not contained:
append(p)
return paths
# -------------------------------------------------------------------------
@staticmethod
def __resolve(seq):
"""
Resolve a sequence of vertices (=pairs of node IDs) into a
sequence of non-recurrent paths
@param seq: the vertex sequence
"""
resolve = S3MultiPath.__resolve
if seq:
head = seq[0]
tail = seq[1:]
tails = []
index = tail.index
append = tails.append
while head in tail:
pos = index(head)
append(tail[:pos])
tail = tail[pos + 1:]
append(tail)
r = []
append = r.append
for tail in tails:
nt = resolve(tail)
for t in nt:
append([head] + t)
return r
else:
return [seq]
# -------------------------------------------------------------------------
# Helper class for simple ancestor paths
#
class Path:
# ---------------------------------------------------------------------
# Construction methods
#
def __init__(self, nodes=None):
""" Constructor """
self.nodes = []
if isinstance(nodes, S3MultiPath.Path):
self.nodes = list(nodes.nodes)
else:
if nodes is None:
nodes = []
elif type(nodes) is str:
nodes = self.__parse(nodes)
elif not isinstance(nodes, (list, tuple)):
nodes = [nodes]
append = self.append
for n in nodes:
if not append(n):
break
# ---------------------------------------------------------------------
def append(self, node=None):
"""
Append a node to this path
@param node: the node
"""
if node is None:
return True
n = str(node)
if not n:
return True
if n not in self.nodes:
self.nodes.append(n)
return True
return False
# ---------------------------------------------------------------------
def extend(self, head, ancestors=None):
"""
Extend this path with a new vertex ancestors<-head, if this
path ends at the head node
@param head: the head node
@param ancestors: the ancestor sequence
"""
if ancestors is None:
# If no head node is specified, use the first ancestor node
path = S3MultiPath.Path(head)
head = path.first()
ancestors = path.nodes[1:]
last = self.last()
if last is None or last == str(head):
append = self.append
path = S3MultiPath.Path(ancestors)
for i in path.nodes:
if not append(i):
break
return self
else:
return None
# ---------------------------------------------------------------------
def cut(self, head, ancestor=None):
"""
Cut off the ancestor<-head vertex from this path, retaining
the head node
@param head: the head node
@param ancestor: the ancestor node
"""
if ancestor is not None:
sequence = [str(head), str(ancestor)]
pos = self.find(sequence)
if pos > 0:
self.nodes = self.nodes[:pos]
else:
# if ancestor is None and the path starts with head,
# then remove the entire path
if str(head) == self.first():
self.nodes = []
return self
# ---------------------------------------------------------------------
# Serialize/Deserialize
#
def __repr__(self):
""" Represent this path as a string """
return "[|%s|]" % "|".join(self.nodes)
def __parse(self, value):
""" Parse a string into nodes """
return value.strip().strip("[").strip("]").strip("|").split("|")
def as_list(self):
""" Return the list of nodes """
return list(self.nodes)
# ---------------------------------------------------------------------
# Item access
#
def __getitem__(self, i):
""" Get the node at position i """
try:
return self.nodes.__getitem__(i)
except IndexError:
return None
# ---------------------------------------------------------------------
def first(self):
""" Get the first node in this path (the nearest ancestor) """
return self[0]
# ---------------------------------------------------------------------
def last(self):
""" Get the last node in this path (the most distant ancestor) """
return self[-1]
# ---------------------------------------------------------------------
# Tests
#
def __contains__(self, sequence):
"""
Check whether this path contains sequence
@param sequence: sequence of node IDs
"""
if self.find(sequence) != -1:
return 1
else:
return 0
# ---------------------------------------------------------------------
def __len__(self):
"""
Get the number of nodes in this path
"""
return len(self.nodes)
# ---------------------------------------------------------------------
def find(self, sequence):
"""
Find a sequence of node IDs in this path
@param sequence: sequence of node IDs (or path)
@returns: position of the sequence (index+1), 0 if the path
is empty, -1 if the sequence wasn't found
"""
path = S3MultiPath.Path(sequence)
sequence = path.nodes
nodes = self.nodes
if not sequence:
return -1
if not nodes:
return 0
head, tail = sequence[0], sequence[1:]
pos = 0
l = len(tail)
index = nodes.index
while head in nodes[pos:]:
pos = index(head, pos) + 1
if not tail or nodes[pos:pos+l] == tail:
return pos
return -1
# ---------------------------------------------------------------------
def startswith(self, sequence):
"""
Check whether this path starts with sequence
@param sequence: sequence of node IDs (or path)
"""
sequence = S3MultiPath.Path(sequence).nodes
if self.nodes[0:len(sequence)] == sequence:
return True
else:
return False
# =============================================================================
class Traceback(object):
""" Generate the traceback for viewing error Tickets """
def __init__(self, text):
""" Traceback constructor """
self.text = text
def xml(self):
""" Returns the xml """
output = self.make_links(CODE(self.text).xml())
return output
def make_link(self, path):
""" Create a link from a path """
tryFile = path.replace("\\", "/")
if os.path.isabs(tryFile) and os.path.isfile(tryFile):
(folder, filename) = os.path.split(tryFile)
(base, ext) = os.path.splitext(filename)
app = current.request.args[0]
editable = {"controllers": ".py", "models": ".py", "views": ".html"}
for key in editable.keys():
check_extension = folder.endswith("%s/%s" % (app, key))
if ext.lower() == editable[key] and check_extension:
return A('"' + tryFile + '"',
_href=URL("edit/%s/%s/%s" % \
(app, key, filename))).xml()
return ""
def make_links(self, traceback):
""" Make links using the given traceback """
lwords = traceback.split('"')
# Make the short circuit compatible with <= python2.4
result = (len(lwords) != 0) and lwords[0] or ""
i = 1
while i < len(lwords):
link = self.make_link(lwords[i])
if link == "":
result += '"' + lwords[i]
else:
result += link
if i + 1 < len(lwords):
result += lwords[i + 1]
i = i + 1
i = i + 1
return result
# =============================================================================
def URL2(a=None, c=None, r=None):
"""
Modified version of URL from gluon/html.py
- used by views/layout_iframe.html for our jquery function
@example:
>>> URL(a="a",c="c")
"/a/c"
generates a url "/a/c" corresponding to application a & controller c
If r=request is passed, a & c are set, respectively,
to r.application, r.controller
The more typical usage is:
URL(r=request) that generates a base url with the present
application and controller.
The function (& optionally args/vars) are expected to be added
via jquery based on attributes of the item.
"""
application = controller = None
if r:
application = r.application
controller = r.controller
if a:
application = a
if c:
controller = c
if not (application and controller):
raise SyntaxError, "not enough information to build the url"
#other = ""
url = "/%s/%s" % (application, controller)
return url
# =============================================================================
class S3MarkupStripper(HTMLParser.HTMLParser):
""" Simple markup stripper """
def __init__(self):
self.reset()
self.result = []
def handle_data(self, d):
self.result.append(d)
def stripped(self):
return "".join(self.result)
# =============================================================================
class S3DataTable(object):
"""
Generate a datatable from a list of Storages and a list of fields
"""
# The dataTable id if no explicit value has been provided
id_counter = 1
def __init__(self,
rfields,
data,
start=0,
limit=None,
filterString=None,
orderby=None,
):
"""
S3DataTable constructor
@param rfields: A list of S3Resourcefield
@param data: A list of Storages the key is of the form table.field
The value is the data to be displayed in the dataTable
@param start: the first row to return from the data
@param limit: the (maximum) number of records to return
@param filterString: The string that was used in filtering the records
@param orderby: the DAL orderby construct
"""
from gluon.dal import Expression
self.data = data
self.rfields = rfields
lfields = []
append = lfields.append
heading = {}
for field in rfields:
selector = "%s.%s" % (field.tname, field.fname)
append(selector)
heading[selector] = (field.label)
self.lfields = lfields
self.heading = heading
max = len(data)
if start < 0:
start == 0
if start > max:
start = max
if limit == None:
end = max
else:
end = start + limit
if end > max:
end = max
self.start = start
self.end = end
self.filterString = filterString
def selectAction(orderby):
if isinstance(orderby, tuple):
for el in orderby:
selectAction(el)
if isinstance(orderby, Field):
extractField(orderby)
elif isinstance(orderby, Expression):
extractExpression(orderby)
else:
self.orderby.append([1, "asc"])
def extractField(field):
cnt = 0
for rfield in rfields:
if str(field) == rfield.colname:
self.orderby.append([cnt, "asc"])
break
cnt += 1
def extractExpression(exp):
cnt = 0
if isinstance(exp.first, Field):
for rfield in rfields:
if str(exp.first) == rfield.colname:
if exp.op == exp.db._adapter.INVERT:
self.orderby.append([cnt, "desc"])
else:
self.orderby.append([cnt, "asc"])
break
cnt += 1
else:
extractExpression(exp.first)
if exp.second:
selectAction(exp.second)
self.orderby = []
selectAction(orderby)
# -------------------------------------------------------------------------
@staticmethod
def getConfigData():
"""
Method to extract the configuration data from S3 globals and
store them as an attr variable
@return: dictionary of attributes which can be passed into html()
@param attr: dictionary of attributes which can be passed in
dt_displayLength : The default number of records that will be shown
dt_pagination: Enable pagination
dt_pagination_type: type of pagination, either:
(default) full_numbers
OR two_button
dt_bFilter: Enable or disable filtering of data.
dt_group: The colum that is used to group the data
dt_ajax_url: The URL to be used for the Ajax call
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_bulk_selected: A list of selected items
dt_actions: dictionary of actions
dt_styles: dictionary of styles to be applied to a list of ids
for example:
{"warning" : [1,3,6,7,9],
"alert" : [2,10,13]}
"""
request = current.request
s3 = current.response.s3
attr = Storage()
if s3.datatable_ajax_source:
attr.dt_ajax_url = s3.datatable_ajax_source
if s3.actions:
attr.dt_actions = s3.actions
if s3.dataTableBulkActions:
attr.dt_bulk_actions = s3.dataTableBulkActions
if s3.dataTable_iDisplayLength:
attr.dt_displayLength = s3.dataTable_iDisplayLength
attr.dt_pagination = "false" if s3.no_sspag else "true"
if s3.dataTable_sPaginationType:
attr.dt_pagination_type = s3.dataTable_sPaginationType
if s3.dataTable_group:
attr.dt_group = s3.dataTable_group
if s3.dataTable_NobFilter:
attr.dt_bFilter = not s3.dataTable_NobFilter
if s3.dataTable_sDom:
attr.dt_sDom = s3.dataTable_sDom
if s3.dataTableDisplay:
attr.dt_display = s3.dataTableDisplay
if s3.dataTableStyleDisabled or s3.dataTableStyleWarning or s3.dataTableStyleAlert:
attr.dt_styles = {}
if s3.dataTableStyleDisabled:
attr.dt_styles["dtdisable"] = s3.dataTableStyleDisabled
if s3.dataTableStyleWarning:
attr.dt_styles["dtwarning"] = s3.dataTableStyleWarning
if s3.dataTableStyleAlert:
attr.dt_styles["dtalert"] = s3.dataTableStyleAlert
return attr
# -------------------------------------------------------------------------
@staticmethod
def getControlData(rfields, vars):
"""
Method that will return the orderby and filter from the vars
returned by the browser, from an ajax call.
@param rfields: A list of S3Resourcefield
@param vars: A list of variables sent from the dataTable
"""
# @todo: does not sort properly in option fields nor FK references
if not vars.iSortingCols:
return (False, "")
sort_cols = int(vars.iSortingCols)
orderby = False
for x in range(sort_cols):
index = int(vars["iSortCol_%s" % x])
f = rfields[index].field
if vars["sSortDir_%s" % x] == "desc":
f = ~f
if not orderby:
orderby = f
else:
orderby |= f
# @todo: does not search properly in option fields nor FK references
words = vars.sSearch
if not words:
return (orderby, "")
words = words.split()
query = None
for rf in rfields:
if rf.ftype in ("string", "text") :
if not query:
query = rf.field.contains(words)
else:
query |= (rf.field.contains(words))
return (orderby, query)
# ---------------------------------------------------------------------
@staticmethod
def listFormats(id, rfields=None, permalink=None):
"""
Calculate the export formats that can be added to the table
@param id: The unique dataTabel ID
@param rfields: optional list of rfields
"""
T = current.T
s3 = current.response.s3
application = current.request.application
# @todo: this needs rework
# - s3FormatRequest must retain any URL filters
# - s3FormatRequest must remove the "search" method
# - other data formats could have other list_fields,
# hence applying the datatable sorting/filters is
# not transparent
if s3.datatable_ajax_source:
end = s3.datatable_ajax_source.find(".aadata")
default_url = s3.datatable_ajax_source[:end] # strip '.aadata' extension
else:
default_url = current.request.url
iconList = []
url = s3.formats.pdf if s3.formats.pdf else default_url
iconList.append(IMG(_src="/%s/static/img/pdficon_small.gif" % application,
_onclick="s3FormatRequest('pdf','%s','%s');" % (id, url),
_alt=T("Export in PDF format"),
))
url = s3.formats.xls if s3.formats.xls else default_url
iconList.append(IMG(_src="/%s/static/img/icon-xls.png" % application,
_onclick="s3FormatRequest('xls','%s','%s');" % (id, url),
_alt=T("Export in XLS format"),
))
url = s3.formats.rss if s3.formats.rss else default_url
iconList.append(IMG(_src="/%s/static/img/RSS_16.png" % application,
_onclick="s3FormatRequest('rss','%s','%s');" % (id, url),
_alt=T("Export in RSS format"),
))
div = DIV(_class='list_formats')
if permalink is not None:
link = A(T("Link to this result"),
_href=permalink,
_class="permalink")
div.append(link)
div.append(" | ")
div.append(current.T("Export to:"))
if "have" in s3.formats:
iconList.append(IMG(_src="/%s/static/img/have_16.png" % application,
_onclick="s3FormatRequest('have','%s','%s');" % (id, s3.formats.have),
_alt=T("Export in HAVE format"),
))
if "kml" in s3.formats:
iconList.append(IMG(_src="/%s/static/img/kml_icon.png" % application,
_onclick="s3FormatRequest('kml','%s','%s');" % (id, s3.formats.kml),
_alt=T("Export in KML format"),
))
elif rfields:
kml_list = ["location_id",
"site_id",
]
for r in rfields:
if r.fname in kml_list:
iconList.append(IMG(_src="/%s/static/img/kml_icon.png" % application,
_onclick="s3FormatRequest('kml','%s','%s');" % (id, default_url),
_alt=T("Export in KML format"),
))
if "map" in s3.formats:
iconList.append(IMG(_src="/%s/static/img/map_icon.png" % application,
_onclick="s3FormatRequest('map','%s','%s');" % (id, s3.formats.map),
_alt=T("Show on map"),
))
for icon in iconList:
div.append(icon)
return div
# ---------------------------------------------------------------------
@staticmethod
def defaultActionButtons(resource,
custom_actions=None,
r=None
):
"""
Configure default action buttons
@param resource: the resource
@param r: the request, if specified, all action buttons will
be linked to the controller/function of this request
rather than to prefix/name of the resource
@param custom_actions: custom actions as list of dicts like
{"label":label, "url":url, "_class":class},
will be appended to the default actions
"""
from s3crud import S3CRUD
auth = current.auth
s3 = current.response.s3
table = resource.table
s3.actions = None
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
labels = current.manager.LABEL
args = ["[id]"]
# Choose controller/function to link to
if r is not None:
c = r.controller
f = r.function
else:
c = resource.prefix
f = resource.name
# "Open" button
if has_permission("update", table) and \
not ownership_required("update", table):
update_url = URL(c=c, f=f, args=args + ["update"])
S3CRUD.action_button(labels.UPDATE, update_url)
else:
read_url = URL(c=c, f=f, args=args)
S3CRUD.action_button(labels.READ, read_url)
# Delete action
# @todo: does not apply selective action (renders DELETE for
# all items even if the user is only permitted to delete
# some of them) => should implement "restrict", see
# S3CRUD.action_buttons
deletable = current.s3db.get_config(resource.tablename, "deletable",
True)
if deletable and \
has_permission("delete", table) and \
not ownership_required("delete", table):
delete_url = URL(c=c, f=f, args = args + ["delete"])
S3CRUD.action_button(labels.DELETE, delete_url)
# Append custom actions
if custom_actions:
s3.actions = s3.actions + custom_actions
# ---------------------------------------------------------------------
@staticmethod
def htmlConfig(html,
id,
orderby,
rfields = None,
cache = None,
filteredrows = None,
**attr
):
"""
Method to wrap the html for a dataTable in a form, the list of formats
used for data export and add the config details required by dataTables,
@param html: The html table
@param id: The id of the table
@param orderby: the sort details see aaSort at http://datatables.net/ref
@param rfields: The list of resource fields
@param attr: dictionary of attributes which can be passed in
dt_displayLength : The default number of records that will be shown
dt_sDom : The Datatable DOM initialisation variable, describing
the order in which elements are displayed.
See http://datatables.net/ref for more details.
dt_pagination : Is pagination enabled, dafault 'true'
dt_pagination_type : How the pagination buttons are displayed
dt_bFilter: Enable or disable filtering of data.
dt_ajax_url: The URL to be used for the Ajax call
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group: The column(s) that is(are) used to group the data
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
dt_group_titles: The titles to be used for each group.
These are a list of lists with the inner list
consisting of two values, the repr from the
db and the label to display. This can be more than
the actual number of groups (giving an empty group).
dt_group_space: Insert a space between the group heading and the next group
dt_bulk_selected: A list of selected items
dt_actions: dictionary of actions
dt_styles: dictionary of styles to be applied to a list of ids
for example:
{"warning" : [1,3,6,7,9],
"alert" : [2,10,13]}
dt_text_maximum_len: The maximum length of text before it is condensed
dt_text_condense_len: The length displayed text is condensed down to
dt_shrink_groups: If set then the rows within a group will be hidden
two types are supported, 'individulal' and 'accordion'
dt_group_types: The type of indicator for groups that can be 'shrunk'
Permitted valies are: 'icon' (the default) 'text' and 'none'
@global current.response.s3.actions used to get the RowActions
"""
from gluon.serializers import json
from gluon.storage import Storage
request = current.request
s3 = current.response.s3
if not s3.dataTableID or not isinstance(s3.dataTableID, list):
s3.dataTableID = [id]
elif id not in s3.dataTableID:
s3.dataTableID.append(id)
# The configuration parameter from the server to the client will be
# sent in a json object stored in an hidden input field. This object
# will then be parsed by s3.dataTable.js and the values used.
config = Storage()
config.id = id
displayLength = attr.get("dt_displayLength", current.manager.ROWSPERPAGE)
# Make sure that the displayed length is not greater than the number of filtered records
if filteredrows and displayLength > filteredrows:
displayLength = filteredrows
config.displayLength = displayLength
config.sDom = attr.get("dt_sDom", 'fril<"dataTable_table"t>pi')
config.pagination = attr.get("dt_pagination", "true")
config.paginationType = attr.get("dt_pagination_type", "full_numbers")
config.bFilter = attr.get("dt_bFilter", "true")
config.ajaxUrl = attr.get("dt_ajax_url", URL(c=request.controller,
f=request.function,
extension="aadata",
args=request.args,
vars=request.get_vars,
))
config.rowStyles = attr.get("dt_styles", [])
rowActions = s3.actions
if rowActions:
config.rowActions = rowActions
else:
config.rowActions = []
bulkActions = attr.get("dt_bulk_actions", None)
if bulkActions and not isinstance(bulkActions, list):
bulkActions = [bulkActions]
config.bulkActions = bulkActions
config.bulkCol = attr.get("dt_bulk_col", 0)
action_col = attr.get("dt_action_col", 0)
if bulkActions and config.bulkCol <= action_col:
action_col += 1
config.actionCol = action_col
group_list = attr.get("dt_group", [])
if not isinstance(group_list, list):
group_list = [group_list]
dt_group = []
for group in group_list:
if bulkActions and config.bulkCol <= group:
group += 1
if action_col >= group:
group -= 1
dt_group.append([group, "asc"])
config.group = dt_group
config.groupTotals = attr.get("dt_group_totals", [])
config.groupTitles = attr.get("dt_group_titles", [])
config.groupSpacing = attr.get("dt_group_space", "false")
for order in orderby:
if bulkActions:
if config.bulkCol <= order[0]:
order[0] += 1
if action_col >= order[0]:
order[0] -= 1
config.aaSort = orderby
config.textMaxLength = attr.get("dt_text_maximum_len", 80)
config.textShrinkLength = attr.get("dt_text_condense_len", 75)
config.shrinkGroupedRows = attr.get("dt_shrink_groups", "false")
config.groupIcon = attr.get("dt_group_types", [])
# Wrap the table in a form and add some data in hidden fields
form = FORM()
if not s3.no_formats and len(html) > 0:
permalink = attr.get("dt_permalink", None)
form.append(S3DataTable.listFormats(id, rfields,
permalink=permalink))
form.append(html)
# Add the configuration details for this dataTable
form.append(INPUT(_type="hidden",
_id="%s_configurations" % id,
_name="config",
_value=json(config)))
# If we have a cache set up then pass it in
if cache:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_cache" %id,
_name="cache",
_value=json(cache)))
# If we have bulk actions then add the hidden fields
if config.bulkActions:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkMode" % id,
_name="mode",
_value="Inclusive"))
bulk_selected = attr.get("dt_bulk_selected", "")
if isinstance(bulk_selected, list):
bulk_selected = ",".join(bulk_selected)
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkSelection" % id,
_name="selected",
_value="[%s]" % bulk_selected))
return form
# ---------------------------------------------------------------------
def table(self, id, flist=None, action_col=0):
"""
Method to render the data as an html table. This is of use if
and html table is required without the dataTable goodness. However
if you want html for a dataTable then use the html() method
@param id: The id of the table
@param flist: The list of fields
@param action_col: The column where action columns will be displayed
(this is required by dataTables)
"""
data = self.data
heading = self.heading
start = self.start
end = self.end
if not flist:
flist = self.lfields
# Build the header row
header = THEAD()
tr = TR()
for field in flist:
if field == "BULK":
tr.append(TH(""))
else:
tr.append(TH(heading[field]))
header.append(tr)
body = TBODY()
if data:
# Build the body rows (the actual data)
rc = 0
for i in xrange(start, end):
row = data[i]
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
rc += 1
tr = TR(_class=_class)
for field in flist:
# Insert a checkbox for bulk select
if field == "BULK":
tr.append(TD(INPUT(_id="select%s" % row[flist[action_col]],
_type="checkbox",
_class="bulkcheckbox",
)))
else:
tr.append(TD(row[field]))
body.append(tr)
table = TABLE([header, body], _id=id, _class="dataTable display")
return table
# ---------------------------------------------------------------------
def html(self,
totalrows,
filteredrows,
id = None,
sEcho = 1,
**attr
):
"""
Method to render the data into html
@param totalrows: The total rows in the unfiltered query.
@param filteredrows: The total rows in the filtered query.
@param id: The id of the table these need to be unique if more
than one dataTable is to be rendered on the same page.
If this is not passed in then a unique id will be
generated. Regardless the id is stored in self.id
so it can be easily accessed after rendering.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param attr: dictionary of attributes which can be passed in
"""
flist = self.lfields
if not id:
id = "list_%s" % self.id_counter
self.id_counter += 1
self.id = id
bulkActions = attr.get("dt_bulk_actions", None)
bulkCol = attr.get("dt_bulk_col", 0)
if bulkCol > len(flist):
bulkCol = len(flist)
action_col = attr.get("dt_action_col", 0)
if action_col != 0:
if action_col == -1 or action_col >= len(flist):
action_col = len(flist) -1
attr["dt_action_col"] = action_col
flist = flist[1:action_col+1] + [flist[0]] + flist[action_col+1:]
# Get the details for any bulk actions. If we have at least one bulk
# action then a column will be added, either at the start or in the
# column identified by dt_bulk_col
if bulkActions:
flist.insert(bulkCol, "BULK")
if bulkCol <= action_col:
action_col += 1
pagination = attr.get("dt_pagination", "true") == "true"
if pagination:
real_end = self.end
self.end = self.start + 1
table = self.table(id, flist, action_col)
cache = None
if pagination:
s3 = current.response.s3
self.end = real_end
aadata = self.aadata(totalrows, filteredrows, id, sEcho,
flist, stringify=False, **attr)
cache = {"iCacheLower": self.start,
"iCacheUpper": self.end if filteredrows > self.end else filteredrows,
"lastJson": aadata}
html = self.htmlConfig(table,
id,
self.orderby,
self.rfields,
cache,
filteredrows,
**attr
)
return html
# ---------------------------------------------------------------------
def aadata(self,
totalrows,
displayrows,
id,
sEcho,
flist,
stringify=True,
**attr
):
"""
Method to render the data into a json object
@param totalrows: The total rows in the unfiltered query.
@param displayrows: The total rows in the filtered query.
@param id: The id of the table for which this ajax call will
respond to.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param flist: The list of fields
@param attr: dictionary of attributes which can be passed in
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
"""
from gluon.serializers import json
data = self.data
if not flist:
flist = self.lfields
start = self.start
end = self.end
action_col = attr.get("dt_action_col", 0)
structure = {}
aadata = []
for i in xrange(start, end):
row = data[i]
details = []
for field in flist:
if field == "BULK":
details.append("<INPUT id='select%s' type='checkbox' class='bulkcheckbox'>" % \
row[flist[action_col]])
else:
details.append(s3_unicode(row[field]))
aadata.append(details)
structure["dataTable_id"] = id
structure["dataTable_filter"] = self.filterString
structure["dataTable_groupTotals"] = attr.get("dt_group_totals", [])
structure["dataTable_sort"] = self.orderby
structure["aaData"] = aadata
structure["iTotalRecords"] = totalrows
structure["iTotalDisplayRecords"] = displayrows
structure["sEcho"] = sEcho
if stringify:
return json(structure)
else:
return structure
# ---------------------------------------------------------------------
def json(self,
totalrows,
displayrows,
id,
sEcho,
stringify=True,
**attr
):
"""
Method to render the data into a json object
@param totalrows: The total rows in the unfiltered query.
@param displayrows: The total rows in the filtered query.
@param id: The id of the table for which this ajax call will
respond to.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param attr: dictionary of attributes which can be passed in
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
"""
flist = self.lfields
action_col = attr.get("dt_action_col", 0)
if action_col != 0:
if action_col == -1 or action_col >= len(flist):
action_col = len(flist) - 1
flist = flist[1:action_col+1] + [flist[0]] + flist[action_col+1:]
# Get the details for any bulk actions. If we have at least one bulk
# action then a column will be added, either at the start or in the
# column identified by dt_bulk_col
bulkActions = attr.get("dt_bulk_actions", None)
bulkCol = attr.get("dt_bulk_col", 0)
if bulkActions:
if bulkCol > len(flist):
bulkCol = len(flist)
flist.insert(bulkCol, "BULK")
if bulkCol <= action_col:
action_col += 1
return self.aadata(totalrows, displayrows, id, sEcho, flist,
stringify, **attr)
# END =========================================================================
|
{
"content_hash": "ee5f9000c21fcc100785d57eaca7bdf3",
"timestamp": "",
"source": "github",
"line_count": 3347,
"max_line_length": 177,
"avg_line_length": 37.12100388407529,
"alnum_prop": 0.4704693989246966,
"repo_name": "madhurauti/Map-Polygon",
"id": "54e8112e16e3e774f5d91fe4411c94995cf6dd6e",
"size": "124269",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/s3/s3utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15527353"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "2202"
},
{
"name": "Python",
"bytes": "23300695"
},
{
"name": "Racket",
"bytes": "166"
}
],
"symlink_target": ""
}
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
__ALL__ = ['addon', 'net']
|
{
"content_hash": "fd176bda2484bb144cb94c5372aa0f8f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 39,
"alnum_prop": 0.717948717948718,
"repo_name": "rysson/filmkodi",
"id": "72e020642e9fbb7274e8637fad6cc9f12a9e0033",
"size": "741",
"binary": false,
"copies": "60",
"ref": "refs/heads/master",
"path": "plugin.video.xbmcfilm/resources/lib/utils/beta/t0mm0/common/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
}
|
"""Runs all unit tests."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import sys
import unittest
import appengine_config
from common import caching
from common import xcontent
from controllers import sites
from models import config
from models import content
from models import courses
from models import vfs
from modules.review import domain
from tests import suite
from tools import verify
from tools.etl import etl
class ShouldHaveFailedByNow(Exception):
"""Special exception raised when a prior method did not raise."""
pass
def assert_fails(function):
"""Checks that function invocation raises an exception."""
try:
function()
raise ShouldHaveFailedByNow(
'Expected to fail: %s().' % function.__name__)
except ShouldHaveFailedByNow as e:
raise e
except Exception:
pass
class DeepDictionaryMergeTest(suite.TestBase):
def test_both_empty_merge(self):
tgt = {}
src = {}
r = courses.deep_dict_merge(tgt, src)
self.assertEqual({}, r)
def test_src_empty_merge(self):
tgt = {'a': {'b': 2}, 'c': None}
src = {}
r = courses.deep_dict_merge(tgt, src)
self.assertEqual(tgt, r)
def test_tgt_empty_merge(self):
tgt = {}
src = {'a': 1}
r = courses.deep_dict_merge(tgt, src)
self.assertEqual(src, r)
def test_non_recursive_merge(self):
tgt = {'a': 1, 'b': 2, 'd': 4}
src = {'a': 1, 'b': 22, 'c': 3}
r = courses.deep_dict_merge(tgt, src)
e = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
self.assertEqual(e, r)
def test_recursive_merge(self):
tgt = {'a': {'b': 1}}
src = {'a': 1, 'c': {'d': 4}}
r = courses.deep_dict_merge(tgt, src)
e = {'a': {'b': 1}, 'c': {'d': 4}}
self.assertEqual(e, r)
class EtlRetryTest(suite.TestBase):
def setUp(self):
super(EtlRetryTest, self).setUp()
self.ceiling = 2
self.retries = 0
def test_delegates_args_and_returns(self):
@etl._retry()
def fn(unused_arg, unused_kwarg=None):
return 'value'
self.assertEqual('value', fn('arg', unused_kwarg='unused_kwarg'))
def test_retries_and_then_succeeds_before_hitting_retry_limit(self):
@etl._retry()
def fail_then_succeed():
self.retries += 1
if self.retries < self.ceiling:
raise Exception
fail_then_succeed()
self.assertEqual(self.ceiling, self.retries)
def test_retries_specified_number_of_times_then_throws(self):
@etl._retry()
def fail():
self.retries += 1
raise Exception
self.assertRaises(Exception, fail)
self.assertEqual(etl._RETRIES, self.retries)
class ReviewModuleDomainTests(suite.TestBase):
def test_review_step_predicates(self):
step = domain.ReviewStep()
self.assertFalse(step.is_assigned)
step._state = domain.REVIEW_STATE_ASSIGNED
self.assertTrue(step.is_assigned)
self.assertFalse(step.is_completed)
step._state = domain.REVIEW_STATE_COMPLETED
self.assertTrue(step.is_completed)
self.assertFalse(step.is_expired)
step._state = domain.REVIEW_STATE_EXPIRED
self.assertTrue(step.is_expired)
class SwapTestObject(object):
def __init__(self):
self.member = 'member_value'
def method(self):
return 'method_value'
class SuiteTestCaseTest(suite.TestBase):
"""Sanity check of Suite.TestBase utilities."""
def setUp(self):
super(SuiteTestCaseTest, self).setUp()
self.swap_test_object = SwapTestObject()
self.old_member = self.swap_test_object.member
self.old_method = self.swap_test_object.method
def tearDown(self):
super(SuiteTestCaseTest, self).tearDown()
self.assert_unswapped()
def assert_unswapped(self):
self.assertIs(self.old_member, self.swap_test_object.member)
self.assertEqual(self.old_method(), self.swap_test_object.method())
def test_swaps_against_different_symbols_apply_and_are_unswappable(self):
self.assertEqual('member_value', self.swap_test_object.member)
self.assertEqual('method_value', self.swap_test_object.method())
self.swap(self.swap_test_object, 'member', 'new_member_value')
self.swap(self.swap_test_object, 'method', lambda: 'new_method_value')
self.assertEqual('new_member_value', self.swap_test_object.member)
self.assertEqual('new_method_value', self.swap_test_object.method())
self._unswap_all()
self.assert_unswapped()
def test_tear_down_unswapps_automatically(self):
# Create a swap to for tearDown to unswap via assert_unswapped.
self.swap(self.swap_test_object, 'member', 'new_member_value')
self.assertEqual('new_member_value', self.swap_test_object.member)
def test_unswap_restores_original_after_multiple_swaps(self):
self.assertEqual('method_value', self.swap_test_object.method())
self.swap(self.swap_test_object, 'method', lambda: 'first_swap')
self.swap(self.swap_test_object, 'method', lambda: 'second_swap')
self.assertEqual('second_swap', self.swap_test_object.method())
self._unswap_all()
self.assert_unswapped()
class InvokeExistingUnitTest(suite.TestBase):
"""Run all units tests declared elsewhere."""
def test_existing_unit_tests(self):
"""Run all units tests declared elsewhere."""
vfs.run_all_unit_tests()
sites.run_all_unit_tests()
config.run_all_unit_tests()
verify.run_all_unit_tests()
content.run_all_unit_tests()
xcontent.run_all_unit_tests()
caching.run_all_unit_tests()
def test_string_encoding(self):
"""Test our understanding of Python string encoding aspects.
We were quite naive to believe Python solves all string encoding issues
automatically. That is not completely true and we have to do a lot of
manual work to get it right. Here we capture some of the patterns.
"""
original_encoding = sys.getdefaultencoding()
# Test with 'ascii' default encoding. Note that GAE runs in 'ascii',
# and not in 'utf-8'. There is no way to override this currently.
appengine_config.gcb_force_default_encoding('ascii')
# Note that Python bravely ignores the file encoding declaration
# 'coding: utf-8' at the top of this file. The intuitive behavior would
# be to change the default encoding to 'utf-8' for all the code running
# in the scope of this file.
# Initialization.
test_1 = 'My Test Title Мой заголовок теста'
test_2 = u'My Test Title Мой заголовок теста'
# Types.
assert isinstance(test_1, str)
assert isinstance(test_2, unicode)
assert test_1 != test_2
# Conversions.
assert_fails(lambda: unicode(test_1))
assert unicode(test_1, 'utf-8')
assert isinstance(unicode(test_1, 'utf-8'), unicode)
assert unicode(test_1, 'utf-8') == test_2
# Expressions.
assert_fails(lambda: test_1 + test_2)
assert_fails(lambda: '%s %s' % (test_1, test_2))
assert_fails(lambda: u'%s %s' % (test_1, test_2)) # Why does it fail?
assert_fails(lambda: ''.join([test_1, test_2]))
assert_fails(lambda: u''.join([test_1, test_2])) # Why does it fail?
''.join([unicode(test_1, 'utf-8'), test_2])
# Test with 'utf-8' default encoding.
appengine_config.gcb_force_default_encoding('utf-8')
# Initialization.
test_1 = 'My Test Title Мой заголовок теста'
test_2 = u'My Test Title Мой заголовок теста'
# Types.
assert isinstance(test_1, str) # How can this be true?
assert isinstance(test_2, unicode)
assert test_1 == test_2 # Note '!=' above, and '==' here. Who knew!!!
# Conversions.
assert unicode(test_1) == test_2
assert unicode(test_1, 'utf-8') == test_2
# Expressions.
assert test_1 + test_2
assert '%s %s' % (test_1, test_2)
assert u'%s %s' % (test_1, test_2)
# Clean up.
appengine_config.gcb_force_default_encoding(original_encoding)
def test_dict_merge(self):
real_values = {'foo': 'bar', 'baz': {'alice': 'john'}}
real_original = dict(real_values.items())
default_values = {'foo': 'baz', 'baz': {'alice': 'ana', 'bob': 'sue'}}
default_original = dict(default_values.items())
# Check merge.
assert {'foo': 'bar', 'baz': {'bob': 'sue', 'alice': 'john'}} == (
courses.deep_dict_merge(real_values, default_values))
# Check originals dicts are intact.
assert real_original == real_values
assert default_original == default_values
# Check merge into an empty dict.
assert courses.DEFAULT_COURSE_YAML_DICT == courses.deep_dict_merge(
{}, courses.DEFAULT_COURSE_YAML_DICT)
# Check value does not merge into dictionary.
real_values = {'foo': 'bar'}
default_values = {'foo': {'bar': 'baz'}}
assert {'foo': 'bar'} == (
courses.deep_dict_merge(real_values, default_values))
# Test array element.
real_values = {'foo': [1, 2, 3]}
default_values = {'baz': [4, 5, 6]}
assert {'foo': [1, 2, 3], 'baz': [4, 5, 6]} == (
courses.deep_dict_merge(real_values, default_values))
def test_app_context_equals(self):
# pylint: disable=g-equals-none
app_context_a = sites.ApplicationContext(
'course', '/slug_a', '/', 'ns_a', None)
app_context_b = sites.ApplicationContext(
'course', '/slug_a', '/', 'ns_a', None)
app_context_c = sites.ApplicationContext(
'course', '/slug_c', '/', 'ns_c', None)
app_context_ad = sites.ApplicationContext(
'course', '/slug_d', '/', 'ns_a', None)
app_context_af = sites.ApplicationContext(
'course', '/slug_a', '/', 'ns_f', None)
self.assertFalse(app_context_a is app_context_b)
self.assertTrue(app_context_a == app_context_b)
self.assertFalse(app_context_a != app_context_b)
self.assertTrue(app_context_a != app_context_c)
self.assertFalse(app_context_a == app_context_c)
self.assertTrue(app_context_a != app_context_ad)
self.assertFalse(app_context_a == app_context_ad)
self.assertTrue(app_context_a != app_context_af)
self.assertFalse(app_context_a == app_context_af)
self.assertTrue(app_context_a != None)
self.assertFalse(app_context_a == None)
def test_app_context_affinity(self):
app_context_a = sites.ApplicationContext(
'course', '/slug_a', '/', 'ns_a', None)
app_context_b = sites.ApplicationContext(
'course', '/slug_a', '/', 'ns_a', None)
app_context_c = sites.ApplicationContext(
'course', '/slug_c', '/', 'ns_c', None)
class MySingleton(caching.RequestScopedSingleton):
def __init__(self, app_context):
self.app_context = app_context
# this tests creation of a fresh new singleton for an app_context
cache_a_1 = MySingleton.instance(app_context_a)
cache_a_2 = MySingleton.instance(app_context_a)
self.assertTrue(cache_a_1.app_context is app_context_a)
self.assertTrue(cache_a_2.app_context is app_context_a)
# this test finds a singleton using different instance of app_context;
# this app_context is compatible with the first one via __eq__()
cache_b_1 = MySingleton.instance(app_context_b)
cache_b_2 = MySingleton.instance(app_context_b)
self.assertTrue(cache_b_1.app_context is app_context_a)
self.assertTrue(cache_b_2.app_context is app_context_a)
# this raises because singleton is already bound to a specific
# app_context and an attempt to get the same singleton with another
# incompatible app_context must fail; we could handle this inside the
# cache, but the current decision is: this is not supported
with self.assertRaises(AssertionError):
MySingleton.instance(app_context_c)
# clear all singletons and try again; it works now
MySingleton.clear_all()
cache_c_1 = MySingleton.instance(app_context_c)
self.assertTrue(cache_c_1.app_context is app_context_c)
if __name__ == '__main__':
unittest.TextTestRunner().run(
unittest.TestLoader().loadTestsFromTestCase(InvokeExistingUnitTest))
|
{
"content_hash": "fab532f5167babc6487737274daed3e2",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 79,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.6135651629072681,
"repo_name": "wavemind/gcb17ml",
"id": "128f51d47f771dd2a9e04ff519e1b212cdab1528",
"size": "13450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_classes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62209"
},
{
"name": "JavaScript",
"bytes": "425162"
},
{
"name": "Python",
"bytes": "3344249"
},
{
"name": "Shell",
"bytes": "23773"
}
],
"symlink_target": ""
}
|
"""Methods related to test expectations/expectation files."""
from __future__ import print_function
import collections
import datetime
import logging
import os
import re
import subprocess
import sys
from typing import Iterable, List, Optional, Set, Tuple, Union
import six
from typ import expectations_parser
from unexpected_passes_common import data_types
from unexpected_passes_common import result_output
FINDER_DISABLE_COMMENT_BASE = 'finder:disable'
FINDER_ENABLE_COMMENT_BASE = 'finder:enable'
FINDER_COMMENT_SUFFIX_GENERAL = '-general'
FINDER_COMMENT_SUFFIX_STALE = '-stale'
FINDER_COMMENT_SUFFIX_UNUSED = '-unused'
ALL_FINDER_SUFFIXES = frozenset([
FINDER_COMMENT_SUFFIX_GENERAL,
FINDER_COMMENT_SUFFIX_STALE,
FINDER_COMMENT_SUFFIX_UNUSED,
])
FINDER_DISABLE_COMMENT_GENERAL = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_GENERAL)
FINDER_DISABLE_COMMENT_STALE = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_STALE)
FINDER_DISABLE_COMMENT_UNUSED = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_UNUSED)
FINDER_ENABLE_COMMENT_GENERAL = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_GENERAL)
FINDER_ENABLE_COMMENT_STALE = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_STALE)
FINDER_ENABLE_COMMENT_UNUSED = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_UNUSED)
FINDER_DISABLE_COMMENTS = frozenset([
FINDER_DISABLE_COMMENT_GENERAL, FINDER_DISABLE_COMMENT_STALE,
FINDER_DISABLE_COMMENT_UNUSED
])
FINDER_ENABLE_COMMENTS = frozenset([
FINDER_ENABLE_COMMENT_GENERAL,
FINDER_ENABLE_COMMENT_STALE,
FINDER_ENABLE_COMMENT_UNUSED,
])
ALL_FINDER_COMMENTS = frozenset(FINDER_DISABLE_COMMENTS
| FINDER_ENABLE_COMMENTS)
GIT_BLAME_REGEX = re.compile(
r'^[\w\s]+\(.+(?P<date>\d\d\d\d-\d\d-\d\d)[^\)]+\)(?P<content>.*)$',
re.DOTALL)
EXPECTATION_LINE_REGEX = re.compile(r'^.*\[ .* \] .* \[ \w* \].*$', re.DOTALL)
# pylint: disable=useless-object-inheritance
class RemovalType(object):
STALE = FINDER_COMMENT_SUFFIX_STALE
UNUSED = FINDER_COMMENT_SUFFIX_UNUSED
class Expectations(object):
def CreateTestExpectationMap(
self, expectation_files: Optional[Union[str, List[str]]],
tests: Optional[Iterable[str]],
grace_period: int) -> data_types.TestExpectationMap:
"""Creates an expectation map based off a file or list of tests.
Args:
expectation_files: A filepath or list of filepaths to expectation files to
read from, or None. If a filepath is specified, |tests| must be None.
tests: An iterable of strings containing test names to check. If
specified, |expectation_file| must be None.
grace_period: An int specifying how many days old an expectation must
be in order to be parsed, i.e. how many days old an expectation must
be before it is a candidate for removal/modification.
Returns:
A data_types.TestExpectationMap, although all its BuilderStepMap contents
will be empty.
"""
def AddContentToMap(content: str, ex_map: data_types.TestExpectationMap,
expectation_file_name: str) -> None:
list_parser = expectations_parser.TaggedTestListParser(content)
expectations_for_file = ex_map.setdefault(
expectation_file_name, data_types.ExpectationBuilderMap())
logging.debug('Parsed %d expectations', len(list_parser.expectations))
for e in list_parser.expectations:
if 'Skip' in e.raw_results:
continue
# Expectations that only have a Pass expectation (usually used to
# override a broader, failing expectation) are not handled by the
# unexpected pass finder, so ignore those.
if e.raw_results == ['Pass']:
continue
expectation = data_types.Expectation(e.test, e.tags, e.raw_results,
e.reason)
assert expectation not in expectations_for_file
expectations_for_file[expectation] = data_types.BuilderStepMap()
logging.info('Creating test expectation map')
assert expectation_files or tests
assert not (expectation_files and tests)
expectation_map = data_types.TestExpectationMap()
if expectation_files:
if not isinstance(expectation_files, list):
expectation_files = [expectation_files]
for ef in expectation_files:
# Normalize to '/' as the path separator.
expectation_file_name = os.path.normpath(ef).replace(os.path.sep, '/')
content = self._GetNonRecentExpectationContent(expectation_file_name,
grace_period)
AddContentToMap(content, expectation_map, expectation_file_name)
else:
expectation_file_name = ''
content = '# results: [ RetryOnFailure ]\n'
for t in tests:
content += '%s [ RetryOnFailure ]\n' % t
AddContentToMap(content, expectation_map, expectation_file_name)
return expectation_map
def _GetNonRecentExpectationContent(self, expectation_file_path: str,
num_days: int) -> str:
"""Gets content from |expectation_file_path| older than |num_days| days.
Args:
expectation_file_path: A string containing a filepath pointing to an
expectation file.
num_days: An int containing how old an expectation in the given
expectation file must be to be included.
Returns:
The contents of the expectation file located at |expectation_file_path|
as a string with any recent expectations removed.
"""
num_days = datetime.timedelta(days=num_days)
content = ''
# `git blame` output is normally in the format:
# revision optional_filename (author date time timezone lineno) line_content
# The --porcelain option is meant to be more machine readable, but is much
# more difficult to parse for what we need to do here. In order to
# guarantee that the filename won't be included in the output (by default,
# it will be shown if there is content from a renamed file), pass -c to
# use the same format as `git annotate`, which is:
# revision (author date time timezone lineno)line_content
# (Note the lack of space between the ) and the content).
cmd = ['git', 'blame', '-c', expectation_file_path]
with open(os.devnull, 'w') as devnull:
blame_output = subprocess.check_output(cmd,
stderr=devnull).decode('utf-8')
for line in blame_output.splitlines(True):
match = GIT_BLAME_REGEX.match(line)
assert match
date = match.groupdict()['date']
line_content = match.groupdict()['content']
if EXPECTATION_LINE_REGEX.match(line):
if six.PY2:
date_parts = date.split('-')
date = datetime.date(year=int(date_parts[0]),
month=int(date_parts[1]),
day=int(date_parts[2]))
else:
date = datetime.date.fromisoformat(date)
date_diff = datetime.date.today() - date
if date_diff > num_days:
content += line_content
else:
logging.debug('Omitting expectation %s because it is too new',
line_content.rstrip())
else:
content += line_content
return content
def RemoveExpectationsFromFile(self,
expectations: List[data_types.Expectation],
expectation_file: str,
removal_type: str) -> Set[str]:
"""Removes lines corresponding to |expectations| from |expectation_file|.
Ignores any lines that match but are within a disable block or have an
inline disable comment.
Args:
expectations: A list of data_types.Expectations to remove.
expectation_file: A filepath pointing to an expectation file to remove
lines from.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
A set of strings containing URLs of bugs associated with the removed
expectations.
"""
with open(expectation_file) as f:
input_contents = f.read()
output_contents = ''
in_disable_block = False
disable_block_reason = ''
disable_block_suffix = ''
removed_urls = set()
for line in input_contents.splitlines(True):
# Auto-add any comments or empty lines
stripped_line = line.strip()
if _IsCommentOrBlankLine(stripped_line):
output_contents += line
# Only allow one enable/disable per line.
assert len([c for c in ALL_FINDER_COMMENTS if c in line]) <= 1
# Handle disable/enable block comments.
if _LineContainsDisableComment(line):
if in_disable_block:
raise RuntimeError(
'Invalid expectation file %s - contains a disable comment "%s" '
'that is in another disable block.' %
(expectation_file, stripped_line))
in_disable_block = True
disable_block_reason = _GetDisableReasonFromComment(line)
disable_block_suffix = _GetFinderCommentSuffix(line)
if _LineContainsEnableComment(line):
if not in_disable_block:
raise RuntimeError(
'Invalid expectation file %s - contains an enable comment "%s" '
'that is outside of a disable block.' %
(expectation_file, stripped_line))
in_disable_block = False
continue
current_expectation = self._CreateExpectationFromExpectationFileLine(
line, expectation_file)
# Add any lines containing expectations that don't match any of the given
# expectations to remove.
if any(e for e in expectations if e == current_expectation):
# Skip any expectations that match if we're in a disable block or there
# is an inline disable comment.
if in_disable_block and _DisableSuffixIsRelevant(
disable_block_suffix, removal_type):
output_contents += line
logging.info(
'Would have removed expectation %s, but inside a disable block '
'with reason %s', stripped_line, disable_block_reason)
elif _LineContainsRelevantDisableComment(line, removal_type):
output_contents += line
logging.info(
'Would have removed expectation %s, but it has an inline disable '
'comment with reason %s',
stripped_line.split('#')[0], _GetDisableReasonFromComment(line))
else:
bug = current_expectation.bug
if bug:
# It's possible to have multiple whitespace-separated bugs per
# expectation, so treat each one separately.
removed_urls |= set(bug.split())
else:
output_contents += line
with open(expectation_file, 'w') as f:
f.write(output_contents)
return removed_urls
def _CreateExpectationFromExpectationFileLine(self, line: str,
expectation_file: str
) -> data_types.Expectation:
"""Creates a data_types.Expectation from |line|.
Args:
line: A string containing a single line from an expectation file.
expectation_file: A filepath pointing to an expectation file |line| came
from.
Returns:
A data_types.Expectation containing the same information as |line|.
"""
header = self._GetExpectationFileTagHeader(expectation_file)
single_line_content = header + line
list_parser = expectations_parser.TaggedTestListParser(single_line_content)
assert len(list_parser.expectations) == 1
typ_expectation = list_parser.expectations[0]
return data_types.Expectation(typ_expectation.test, typ_expectation.tags,
typ_expectation.raw_results,
typ_expectation.reason)
def _GetExpectationFileTagHeader(self, expectation_file: str) -> str:
"""Gets the tag header used for expectation files.
Args:
expectation_file: A filepath pointing to an expectation file to get the
tag header from.
Returns:
A string containing an expectation file header, i.e. the comment block at
the top of the file defining possible tags and expected results.
"""
raise NotImplementedError()
def ModifySemiStaleExpectations(
self, stale_expectation_map: data_types.TestExpectationMap) -> Set[str]:
"""Modifies lines from |stale_expectation_map| in |expectation_file|.
Prompts the user for each modification and provides debug information since
semi-stale expectations cannot be blindly removed like fully stale ones.
Args:
stale_expectation_map: A data_types.TestExpectationMap containing stale
expectations.
file_handle: An optional open file-like object to output to. If not
specified, stdout will be used.
Returns:
A set of strings containing URLs of bugs associated with the modified
(manually modified by the user or removed by the script) expectations.
"""
expectations_to_remove = []
expectations_to_modify = []
modified_urls = set()
for expectation_file, e, builder_map in (
stale_expectation_map.IterBuilderStepMaps()):
with open(expectation_file) as infile:
file_contents = infile.read()
line, line_number = self._GetExpectationLine(e, file_contents,
expectation_file)
expectation_str = None
if not line:
logging.error(
'Could not find line corresponding to semi-stale expectation for '
'%s with tags %s and expected results %s', e.test, e.tags,
e.expected_results)
expectation_str = '[ %s ] %s [ %s ]' % (' '.join(
e.tags), e.test, ' '.join(e.expected_results))
else:
expectation_str = '%s (approx. line %d)' % (line, line_number)
str_dict = result_output.ConvertBuilderMapToPassOrderedStringDict(
builder_map)
print('\nSemi-stale expectation:\n%s' % expectation_str)
result_output.RecursivePrintToFile(str_dict, 1, sys.stdout)
response = _WaitForUserInputOnModification()
if response == 'r':
expectations_to_remove.append(e)
elif response == 'm':
expectations_to_modify.append(e)
# It's possible that the user will introduce a typo while manually
# modifying an expectation, which will cause a parser error. Catch that
# now and give them chances to fix it so that they don't lose all of their
# work due to an early exit.
while True:
try:
with open(expectation_file) as infile:
file_contents = infile.read()
_ = expectations_parser.TaggedTestListParser(file_contents)
break
except expectations_parser.ParseError as error:
logging.error('Got parser error: %s', error)
logging.error(
'This probably means you introduced a typo, please fix it.')
_WaitForAnyUserInput()
modified_urls |= self.RemoveExpectationsFromFile(expectations_to_remove,
expectation_file,
RemovalType.STALE)
for e in expectations_to_modify:
modified_urls |= set(e.bug.split())
return modified_urls
def _GetExpectationLine(self, expectation: data_types.Expectation,
file_contents: str, expectation_file: str
) -> Union[Tuple[None, None], Tuple[str, int]]:
"""Gets the line and line number of |expectation| in |file_contents|.
Args:
expectation: A data_types.Expectation.
file_contents: A string containing the contents read from an expectation
file.
expectation_file: A string containing the path to the expectation file
that |file_contents| came from.
Returns:
A tuple (line, line_number). |line| is a string containing the exact line
in |file_contents| corresponding to |expectation|. |line_number| is an int
corresponding to where |line| is in |file_contents|. |line_number| may be
off if the file on disk has changed since |file_contents| was read. If a
corresponding line cannot be found, both |line| and |line_number| are
None.
"""
# We have all the information necessary to recreate the expectation line and
# line number can be pulled during the initial expectation parsing. However,
# the information we have is not necessarily in the same order as the
# text file (e.g. tag ordering), and line numbers can change pretty
# dramatically between the initial parse and now due to stale expectations
# being removed. So, parse this way in order to improve the user experience.
file_lines = file_contents.splitlines()
for line_number, line in enumerate(file_lines):
if _IsCommentOrBlankLine(line.strip()):
continue
current_expectation = self._CreateExpectationFromExpectationFileLine(
line, expectation_file)
if expectation == current_expectation:
return line, line_number + 1
return None, None
def FindOrphanedBugs(self, affected_urls: Iterable[str]) -> Set[str]:
"""Finds cases where expectations for bugs no longer exist.
Args:
affected_urls: An iterable of affected bug URLs, as returned by functions
such as RemoveExpectationsFromFile.
Returns:
A set containing a subset of |affected_urls| who no longer have any
associated expectations in any expectation files.
"""
seen_bugs = set()
expectation_files = self.GetExpectationFilepaths()
for ef in expectation_files:
with open(ef) as infile:
contents = infile.read()
for url in affected_urls:
if url in seen_bugs:
continue
if url in contents:
seen_bugs.add(url)
return set(affected_urls) - seen_bugs
def GetExpectationFilepaths(self) -> List[str]:
"""Gets all the filepaths to expectation files of interest.
Returns:
A list of strings, each element being a filepath pointing towards an
expectation file.
"""
raise NotImplementedError()
def _WaitForAnyUserInput() -> None:
"""Waits for any user input.
Split out for testing purposes.
"""
_get_input('Press any key to continue')
def _WaitForUserInputOnModification() -> str:
"""Waits for user input on how to modify a semi-stale expectation.
Returns:
One of the following string values:
i - Expectation should be ignored and left alone.
m - Expectation will be manually modified by the user.
r - Expectation should be removed by the script.
"""
valid_inputs = ['i', 'm', 'r']
prompt = ('How should this expectation be handled? (i)gnore/(m)anually '
'modify/(r)emove: ')
response = _get_input(prompt).lower()
while response not in valid_inputs:
print('Invalid input, valid inputs are %s' % (', '.join(valid_inputs)))
response = _get_input(prompt).lower()
return response
def _LineContainsDisableComment(line: str) -> bool:
return FINDER_DISABLE_COMMENT_BASE in line
def _LineContainsEnableComment(line: str) -> bool:
return FINDER_ENABLE_COMMENT_BASE in line
def _GetFinderCommentSuffix(line: str) -> str:
"""Gets the suffix of the finder comment on the given line.
Examples:
'foo # finder:disable' -> ''
'foo # finder:disable-stale some_reason' -> '-stale'
"""
target_str = None
if _LineContainsDisableComment(line):
target_str = FINDER_DISABLE_COMMENT_BASE
elif _LineContainsEnableComment(line):
target_str = FINDER_ENABLE_COMMENT_BASE
else:
raise RuntimeError('Given line %s did not have a finder comment.' % line)
line = line[line.find(target_str):]
line = line.split()[0]
suffix = line.replace(target_str, '')
assert suffix in ALL_FINDER_SUFFIXES
return suffix
def _LineContainsRelevantDisableComment(line: str, removal_type: str) -> bool:
"""Returns whether the given line contains a relevant disable comment.
Args:
line: A string containing the line to check.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
A bool denoting whether |line| contains a relevant disable comment given
|removal_type|.
"""
if FINDER_DISABLE_COMMENT_GENERAL in line:
return True
if FINDER_DISABLE_COMMENT_BASE + removal_type in line:
return True
return False
def _DisableSuffixIsRelevant(suffix: str, removal_type: str) -> bool:
"""Returns whether the given suffix is relevant given the removal type.
Args:
suffix: A string containing a disable comment suffix.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
True if suffix is relevant and its disable request should be honored.
"""
if suffix == FINDER_COMMENT_SUFFIX_GENERAL:
return True
if suffix == removal_type:
return True
return False
def _GetDisableReasonFromComment(line: str) -> str:
suffix = _GetFinderCommentSuffix(line)
return line.split(FINDER_DISABLE_COMMENT_BASE + suffix, 1)[1].strip()
def _IsCommentOrBlankLine(line: str) -> bool:
return (not line or line.startswith('#'))
def _get_input(prompt: str) -> str:
if sys.version_info[0] == 2:
return raw_input(prompt)
return input(prompt)
|
{
"content_hash": "09f7ebb9a6e90a7812210f85169d7401",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 80,
"avg_line_length": 39.02325581395349,
"alnum_prop": 0.6507288897038599,
"repo_name": "nwjs/chromium.src",
"id": "8a9997c75cffe6348fac00e8a3e23e0ea3e49b69",
"size": "21954",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "testing/unexpected_passes_common/expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Client actions related to administrating the client and its configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import platform
import socket
import time
import traceback
import cryptography
from cryptography.hazmat.backends import openssl
from future.builtins import map
from future.builtins import range
from future.builtins import str
from future.utils import iteritems
import pkg_resources
import psutil
import pytsk3
from grr_response_client import actions
from grr_response_client.client_actions import tempfiles
from grr_response_core import config
from grr_response_core.lib import communicator
from grr_response_core.lib import config_lib
from grr_response_core.lib import queues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class Echo(actions.ActionPlugin):
"""Returns a message to the server."""
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
def Run(self, args):
self.SendReply(args)
def GetHostnameFromClient(args):
del args # Unused.
yield rdf_protodict.DataBlob(string=socket.gethostname())
class GetHostname(actions.ActionPlugin):
"""Retrieves the host name of the client."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, args):
for res in GetHostnameFromClient(args):
self.SendReply(res)
class GetPlatformInfo(actions.ActionPlugin):
"""Retrieves platform information."""
out_rdfvalues = [rdf_client.Uname]
def Run(self, unused_args):
"""Populate platform information into a Uname response."""
self.SendReply(rdf_client.Uname.FromCurrentSystem())
class Kill(actions.ActionPlugin):
"""A client action for terminating (killing) the client.
Used for testing process respawn.
"""
out_rdfvalues = [rdf_flows.GrrMessage]
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) # pylint: disable=protected-access
class Hang(actions.ActionPlugin):
"""A client action for simulating the client becoming unresponsive (hanging).
Used for testing nanny terminating the client.
"""
in_rdfvalue = rdf_protodict.DataBlob
def Run(self, arg):
# Sleep a really long time.
time.sleep(arg.integer or 6000)
class BusyHang(actions.ActionPlugin):
"""A client action that burns cpu cycles. Used for testing cpu limits."""
in_rdfvalue = rdf_protodict.DataBlob
def Run(self, arg):
duration = 5
if arg and arg.integer:
duration = arg.integer
end = time.time() + duration
while time.time() < end:
pass
class Bloat(actions.ActionPlugin):
"""A client action that uses lots of memory for testing."""
in_rdfvalue = rdf_protodict.DataBlob
def Run(self, arg):
iterations = arg.integer or 1024 # Gives 1 gb.
l = []
for _ in range(iterations):
l.append("X" * 1048576) # 1 mb.
time.sleep(60)
class GetConfiguration(actions.ActionPlugin):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
BLOCKED_PARAMETERS = ["Client.private_key"]
def Run(self, unused_arg):
"""Retrieve the configuration except for the blocked parameters."""
out = self.out_rdfvalues[0]()
for descriptor in config.CONFIG.type_infos:
if descriptor.name in self.BLOCKED_PARAMETERS:
value = "[Redacted]"
else:
try:
value = config.CONFIG.Get(descriptor.name, default=None)
except (config_lib.Error, KeyError, AttributeError, ValueError) as e:
logging.info("Config reading error: %s", e)
continue
if value is not None:
out[descriptor.name] = value
self.SendReply(out)
class GetLibraryVersions(actions.ActionPlugin):
"""Retrieves version information for installed libraries."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
def GetSSLVersion(self):
return openssl.backend.openssl_version_text()
def GetCryptographyVersion(self):
return cryptography.__version__
def GetPSUtilVersion(self):
return ".".join(map(utils.SmartUnicode, psutil.version_info))
def GetProtoVersion(self):
return pkg_resources.get_distribution("protobuf").version
def GetTSKVersion(self):
return pytsk3.TSK_VERSION_STR
def GetPyTSKVersion(self):
return pytsk3.get_version()
library_map = {
"pytsk": GetPyTSKVersion,
"TSK": GetTSKVersion,
"cryptography": GetCryptographyVersion,
"SSL": GetSSLVersion,
"psutil": GetPSUtilVersion,
}
error_str = "Unable to determine library version: %s"
def Run(self, unused_arg):
result = self.out_rdfvalues[0]()
for lib, f in iteritems(self.library_map):
try:
result[lib] = f(self)
except Exception: # pylint: disable=broad-except
result[lib] = self.error_str % traceback.format_exc()
self.SendReply(result)
class UpdateConfiguration(actions.ActionPlugin):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdf_protodict.Dict
UPDATABLE_FIELDS = {"Client.foreman_check_frequency",
"Client.server_urls",
"Client.max_post_size",
"Client.max_out_queue",
"Client.poll_min",
"Client.poll_max",
"Client.rss_max"} # pyformat: disable
def _UpdateConfig(self, filtered_arg, config_obj):
for field, value in iteritems(filtered_arg):
config_obj.Set(field, value)
try:
config_obj.Write()
except (IOError, OSError):
pass
def Run(self, arg):
"""Does the actual work."""
try:
if self.grr_worker.client.FleetspeakEnabled():
raise ValueError("Not supported on Fleetspeak enabled clients.")
except AttributeError:
pass
smart_arg = {str(field): value for field, value in iteritems(arg)}
disallowed_fields = [
field for field in smart_arg
if field not in UpdateConfiguration.UPDATABLE_FIELDS
]
if disallowed_fields:
raise ValueError("Received an update request for restricted field(s) %s."
% ",".join(disallowed_fields))
if platform.system() != "Windows":
# Check config validity before really applying the changes. This isn't
# implemented for our Windows clients though, whose configs are stored in
# the registry, as opposed to in the filesystem.
canary_config = config.CONFIG.CopyConfig()
# Prepare a temporary file we'll write changes to.
with tempfiles.CreateGRRTempFile(mode="w+") as temp_fd:
temp_filename = temp_fd.name
# Write canary_config changes to temp_filename.
canary_config.SetWriteBack(temp_filename)
self._UpdateConfig(smart_arg, canary_config)
try:
# Assert temp_filename is usable by loading it.
canary_config.SetWriteBack(temp_filename)
# Wide exception handling passed here from config_lib.py...
except Exception: # pylint: disable=broad-except
logging.warning("Updated config file %s is not usable.", temp_filename)
raise
# If temp_filename works, remove it (if not, it's useful for debugging).
os.unlink(temp_filename)
# The changes seem to work, so push them to the real config.
self._UpdateConfig(smart_arg, config.CONFIG)
def GetClientInformation():
return rdf_client.ClientInformation(
client_name=config.CONFIG["Client.name"],
client_description=config.CONFIG["Client.description"],
client_version=int(config.CONFIG["Source.version_numeric"]),
build_time=config.CONFIG["Client.build_time"],
labels=config.CONFIG.Get("Client.labels", default=None))
class GetClientInfo(actions.ActionPlugin):
"""Obtains information about the GRR client installed."""
out_rdfvalues = [rdf_client.ClientInformation]
def Run(self, unused_args):
self.SendReply(GetClientInformation())
class GetClientStats(actions.ActionPlugin):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
def Run(self, arg):
"""Returns the client stats."""
if arg is None:
arg = rdf_client_action.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time())
response = rdf_client_stats.ClientStats(
RSS_size=meminfo.rss,
VMS_size=meminfo.vms,
memory_percent=proc.memory_percent(),
bytes_received=communicator.GRR_CLIENT_RECEIVED_BYTES.GetValue(),
bytes_sent=communicator.GRR_CLIENT_SENT_BYTES.GetValue(),
create_time=create_time,
boot_time=boot_time)
response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
self.Send(response)
def Send(self, response):
self.SendReply(response)
class GetClientStatsAuto(GetClientStats):
"""This class is used to send the reply to a well known flow on the server."""
def Send(self, response):
self.grr_worker.SendReply(
rdf_client_stats.ClientStats.Downsampled(response),
session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False)
class SendStartupInfo(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_client.StartupInfo]
well_known_session_id = rdfvalue.SessionID(flow_name="Startup")
def Run(self, unused_arg, ttl=None):
"""Returns the startup information."""
logging.debug("Sending startup information.")
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
response = rdf_client.StartupInfo(
boot_time=boot_time, client_info=GetClientInformation())
self.grr_worker.SendReply(
response,
session_id=self.well_known_session_id,
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False,
ttl=ttl)
|
{
"content_hash": "8fca4d0fe66da4462e47dce809364c22",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 80,
"avg_line_length": 31.288888888888888,
"alnum_prop": 0.6976207386363636,
"repo_name": "demonchild2112/travis-test",
"id": "bf61628aa907bee07793e45714bedfedbc5ad6d4",
"size": "11286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/client/grr_response_client/client_actions/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_connectivity_connection import TapiConnectivityConnection # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connectivity_service import TapiConnectivityConnectivityService # noqa: F401,E501
from tapi_server import util
class TapiConnectivityConnectivityContext(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, connectivity_service=None, connection=None): # noqa: E501
"""TapiConnectivityConnectivityContext - a model defined in OpenAPI
:param connectivity_service: The connectivity_service of this TapiConnectivityConnectivityContext. # noqa: E501
:type connectivity_service: List[TapiConnectivityConnectivityService]
:param connection: The connection of this TapiConnectivityConnectivityContext. # noqa: E501
:type connection: List[TapiConnectivityConnection]
"""
self.openapi_types = {
'connectivity_service': List[TapiConnectivityConnectivityService],
'connection': List[TapiConnectivityConnection]
}
self.attribute_map = {
'connectivity_service': 'connectivity-service',
'connection': 'connection'
}
self._connectivity_service = connectivity_service
self._connection = connection
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityConnectivityContext':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.ConnectivityContext of this TapiConnectivityConnectivityContext. # noqa: E501
:rtype: TapiConnectivityConnectivityContext
"""
return util.deserialize_model(dikt, cls)
@property
def connectivity_service(self):
"""Gets the connectivity_service of this TapiConnectivityConnectivityContext.
none # noqa: E501
:return: The connectivity_service of this TapiConnectivityConnectivityContext.
:rtype: List[TapiConnectivityConnectivityService]
"""
return self._connectivity_service
@connectivity_service.setter
def connectivity_service(self, connectivity_service):
"""Sets the connectivity_service of this TapiConnectivityConnectivityContext.
none # noqa: E501
:param connectivity_service: The connectivity_service of this TapiConnectivityConnectivityContext.
:type connectivity_service: List[TapiConnectivityConnectivityService]
"""
self._connectivity_service = connectivity_service
@property
def connection(self):
"""Gets the connection of this TapiConnectivityConnectivityContext.
none # noqa: E501
:return: The connection of this TapiConnectivityConnectivityContext.
:rtype: List[TapiConnectivityConnection]
"""
return self._connection
@connection.setter
def connection(self, connection):
"""Sets the connection of this TapiConnectivityConnectivityContext.
none # noqa: E501
:param connection: The connection of this TapiConnectivityConnectivityContext.
:type connection: List[TapiConnectivityConnection]
"""
self._connection = connection
|
{
"content_hash": "707fb4e797833ae11174b29d02690a81",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 124,
"avg_line_length": 37.265957446808514,
"alnum_prop": 0.7071081929774479,
"repo_name": "karthik-sethuraman/ONFOpenTransport",
"id": "7610cdfd2a0124450c5253a68e9aa7eb8b0ae212",
"size": "3520",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_connectivity_connectivity_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "836"
},
{
"name": "D",
"bytes": "2195"
},
{
"name": "Python",
"bytes": "960828"
},
{
"name": "Shell",
"bytes": "3059"
}
],
"symlink_target": ""
}
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
batch_size = 128
num_classes = 10
epochs = 20
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
{
"content_hash": "d0794c39f159c589b0b1b3b5ff7e08cd",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 60,
"avg_line_length": 29.178571428571427,
"alnum_prop": 0.682374541003672,
"repo_name": "antoniosehk/keras-tensorflow-windows-installation",
"id": "1be71e771d7f917b18ae03bc2bb9f2443526294a",
"size": "1634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mnist_mlp.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import six
import inflection
from schematics import types
from schematics import models
class BetfairModelMeta(models.ModelMeta):
"""Set default `serialized_name` and `deserialize_from` of Schematics types
to camel-cased attribute names.
"""
def __new__(meta, name, bases, attrs):
for name, attr in six.iteritems(attrs):
if isinstance(attr, types.BaseType):
camelized = inflection.camelize(name, uppercase_first_letter=False)
attr.serialized_name = attr.serialized_name or camelized
attr.deserialize_from = attr.deserialize_from or camelized
return super(BetfairModelMeta, meta).__new__(meta, name, bases, attrs)
class BetfairModel(six.with_metaclass(BetfairModelMeta, models.Model)):
def __init__(self, **data):
super(BetfairModel, self).__init__()
self.import_data(data)
def import_data(self, data, **kwargs):
kwargs['strict'] = False
return super(BetfairModel, self).import_data(data, **kwargs)
|
{
"content_hash": "578bc3f09451fbdf8d66c70fdc08b215",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 83,
"avg_line_length": 37,
"alnum_prop": 0.668918918918919,
"repo_name": "Taketrung/betfair.py",
"id": "08856bf52a1e1066848e0566f455496c8a1d4d86",
"size": "1061",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "betfair/meta/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61637"
}
],
"symlink_target": ""
}
|
import wx
import wx.wizard
import os, sys #for lib test
#from textconf import appen # lets import the configuration file editor
class TitledPage(wx.wizard.WizardPageSimple):
def __init__(self, parent, title):
wx.wizard.WizardPageSimple.__init__(self, parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
titleText = wx.StaticText(self, -1, title)
titleText.SetFont(
wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(titleText, 0,
wx.ALIGN_CENTRE | wx.ALL, 5)
self.sizer.Add(wx.StaticLine(self, -1), 0,
wx.EXPAND | wx.ALL, 5)
try:
import gnupg
pg = 'yes'
except ImportError:
pg = 'nope'
try:
import regex2dfa
rf = 'yes'
except ImportError:
rf = 'nope'
try:
import fte
ft = 'yes'
except ImportError:
ft = 'nope'
try:
import Crypto
pc = 'yes'
except ImportError:
pc = 'nope'
#install script
def install():
os.system('./../debianinstaller')
return 'ok'
if __name__ == "__main__":
app = wx.PySimpleApp()
wizard = wx.wizard.Wizard(None, -1, "LayerProx Wizard")
page1 = TitledPage(wizard, "Welcome!")
page2 = TitledPage(wizard, "Library install")
#button = wx.Button(page2, -1, "Install it", pos=(200, 110))
#self.bind(wx.EVT_BUTTON, OnClick, button)
#button.SetDefault()
page3 = TitledPage(wizard, "Library check")
page4 = TitledPage(wizard, "Launch")
page1.sizer.Add(wx.StaticText(page1, -1,
"This wizard will guide you throw installing LayerProx"))#welcome msg
page2.sizer.Add(wx.StaticText(page2, -1,
"for layerprox you need the following python libraries:\n\n gnupg\n\n pycrypto \n\n fte and regex2dfa"
"installed: " + install()
)) #lib install
page3.sizer.Add(wx.StaticText(page3, -1,
"python-gnupg: " + pg + '\n\n'
"regex2dfa: " + rf + '\n\n'
"fte: " + ft + '\n\n'
"pycrypto: " + pc + '\n\n'))
page4.sizer.Add(wx.StaticText(page4, -1,
"Finished installing, other cool projects:"
"\n\n"
"My github https://github.com/flipchan \n\n"
"Marionette https://github.com/marionette/marionette \n\n"
"Bitmask https://bitmask.net/ \n\n"
"Tor https://www.torproject.org/ \n\n"
"Demonsaw demonsaw.com\n\n"
"Press finish to configure LayerProx\n\n"))
wx.wizard.WizardPageSimple_Chain(page1, page2)
wx.wizard.WizardPageSimple_Chain(page2, page3)
wx.wizard.WizardPageSimple_Chain(page3, page4)
wizard.FitToPage(page1)
if wizard.RunWizard(page1):
print "Press open to edit the conf file"
if IOError:
print 'something went wrong'
if wx.wizard.EVT_WIZARD_FINISHED:
from textconf import appen
wizard.Destroy()
|
{
"content_hash": "b4cfd86b924f131cbf4a593e4c15098a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 107,
"avg_line_length": 28.864077669902912,
"alnum_prop": 0.5882946518668012,
"repo_name": "flipchan/LayerProx",
"id": "5cef94e2fcf0f8f170c19eaf803ffdc7af33962b",
"size": "3016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui/setupwizard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22156"
},
{
"name": "HTML",
"bytes": "15466"
},
{
"name": "JavaScript",
"bytes": "48"
},
{
"name": "Python",
"bytes": "1335880"
},
{
"name": "Shell",
"bytes": "12455"
}
],
"symlink_target": ""
}
|
import io
import os
from contexts.plugin_interface import TEST_FOLDER, TEST_FILE
class FileSpecIdentifier:
def __init__(self):
self._specs = None
def setup_parser(self, parser):
parser.add_argument(
'--filespec',
action='store',
dest='specs',
default=None,
help="Path to a file containing files and directories to search for tests.")
def initialise(self, args=None, env=None, file=None, cwd=None):
"""
Filthy hack: we provide file and cwd here rather than as constructor
args because Mr Hodgson decided to pass stdout as the only parameter
to __init__.
File should only be passed during tests.
"""
self.spec_file = args and args.specs or None
self.cwd = cwd or os.getcwd()
self.file = file
return self.spec_file is not None
def identify_folder(self, folder):
for f in self.specs:
if f == folder:
return TEST_FOLDER
if(f.startswith(folder)):
return TEST_FOLDER
def identify_file(self, file):
for f in self.specs:
if(f == file):
return TEST_FILE
@property
def specs(self):
if(self._specs is None):
self.read_from_file()
return self._specs
def get_path(self, p):
return os.path.join(self.cwd, p.rstrip())
def read_from_file(self):
if(self.file is not None):
self._specs = [os.path.join(self.cwd, p) for p in self.file.readlines()]
else:
with io.open(self.spec_file, 'r') as file:
self._specs = [self.get_path(p) for p in file.readlines()]
|
{
"content_hash": "6a01d85a3a9b87c5a82227550477ddbe",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 88,
"avg_line_length": 30.350877192982455,
"alnum_prop": 0.5670520231213872,
"repo_name": "benjamin-hodgson/Contexts",
"id": "ce02898147cc9e0bf67fc51d972545013e78f3e1",
"size": "1730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/contexts/plugins/identification/filespec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "281182"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='ldsconf',
version='1.0.1',
description='LDS General Conference Study Plan Generator',
url='https://github.com/burnhamup/ldsconf',
author='Chris Burnham',
author_email="chris@burnhamup.com",
license='MIT',
packages=['ldsconf'],
install_requires=['lxml', 'requests'],
data_files=[('data', ['data/conferences.json'])],
entry_points={
'console_scripts': [
'ldsconf=ldsconf.main:main'
]
}
)
|
{
"content_hash": "d2d85ec370ba1fa7c9226ebb6ffa4ede",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 26.57894736842105,
"alnum_prop": 0.6138613861386139,
"repo_name": "burnhamup/ldsconf",
"id": "f686ba9429d7abfb6196b7a9e04898b70482d903",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11600"
}
],
"symlink_target": ""
}
|
import json
data = {}
data[('Allow deletion of hypervisor nodes via nova-manage', 'Administrative')] = ['94536']
data[('Add a new nova service which pre-caches images on hypervisor nodes', 'Administrative')] = ['85768']
data[('Add support for monitoring the number of free IPs in a given fixed-ip block', 'Administrative')] = ['94299']
data[('Tweak the output of "nova hypervisor-state" to show the number of free vcpus after the cpu_allocation_ratio is taken into account', 'Administrative')] = ['98058']
data[('Implement support for nested quotas', 'Administrative')] = ['110639']
data[('Add additional database indexes to help address slow SQL queries', 'Administrative')] = ['97520']
data[('Allow ephemeral instance storage to be served by cinder', 'Administrative')] = ['91321']
data[('A scheme to support online SQL schema changes', 'Administrative')] = ['102545']
data[('Allow volumes to be force detached from instances', 'Administrative')] = ['84048']
data[('Refactor the iSCSI driver to support other iSCSI transports besides TCP', 'Administrative')] = ['86637']
data[('More clearly mark auto disabled hypervisors in the SQL database', 'Administrative')] = ['88515']
data[('Consume cinder event notifications', 'Administrative')] = ['87546']
data[('Continue moving cold migrations to conductor', 'Administrative')] = ['86907']
data[('Enable the nova metadata cache to be a shared resource to improve the hit rate', 'Administrative')] = ['121646']
data[('Make TCP keepalive tunable', 'Administrative')] = ['87427']
data[('Allow TLS for connections to spice and vnc consoles', 'Administrative')] = ['101026']
data[('Morphing the v3 API proposal into a v2.1 API', 'API')] = ['84695']
data[('Supporting micro-versions in the v2.1 API', 'API')] = ['96139','101648','104418']
data[('A proposal for how to support neutron in the v3 API', 'API')] = ['92926']
data[('Include extra-specs information in the output of flavor show and list calls', 'API')] = ['95408']
data[('Pass through creation hints to cinder when auto-creating volumes on instance boot', 'API')] = ['106330']
data[('Add support for tagging to the EC2 API', 'API')] = ['90276']
data[('Expose hypervisor metrics via an API', 'API')] = ['91998']
data[('Make fetching images from glance pluggable so that its easier to write alternate implementations', 'Image features')] = ['86583']
data[('Convert to using the glance v2 API', 'Image feature')] = ['84887']
data[('Pre-fetch specific images to hypervisor nodes to speed up boot times', 'Image feature')] = ['85792']
data[('Allow users of Nova such as Trove "hide" instances running as a certain user from the user', 'Instance features')] = ['90678']
data[('Support deletion of volumes when an instance terminates', 'Instance features')] = ['89777']
data[('Implement VMThunder support for booting many identical instances using boot-from-volume', 'Instance features')] = ['94060']
data[('Enable changing the owner of an instance. There were two proposed implementations', 'Instance features')] = ['85811','105367']
data[('Support USB hot plug', 'Instance features')] = ['89842']
data[('Allow USB devices to be passed through to instances', 'Instance features')] = ['86118']
data[('Support specifying the USB controller for USB passthrough', 'Instance features')] = ['88337']
data[('Enable USB redirection over remote console connections', 'Instance features')] = ['89834']
data[('Configure the vCPU over-commitment for specific flavors', 'Instance features')] = ['87213']
data[('Refactor nova-network to be maintainable on freebsd', 'Networking')] = ['95328']
data[('Reconcile how DNS resolution works between Nova and Neutron', 'Networking')] = ['90150']
data[('Associate the least recently used fixed IP address', 'Networking')] = ['102688']
data[('Add an action type to the filter scheduler so it knows why a scheduling operation is occurring', 'Scheduler')] = ['97103']
data[('Use the scheduler to more strongly validate the destination hypervisor node for live migration of instances', 'Scheduler')] = ['89502']
data[('A proposed scheduler which doesn\'t use a SQL database', 'Scheduler')] = ['92128']
data[('Add additional monitors for utilisation based scheduling', 'Scheduler')] = ['89766']
data[('Add utilisation based weighers', 'Scheduler')] = ['90647']
data[('Strongly validate the tenant and user for quota consuming requests with keystone', 'Security')] = ['92507']
data[('Add FreeBSD as a supported hypervisor operating system', 'Hypervisor: FreeBSD')] = ['85119']
data[('Proposed re-integration of the docker driver', 'Hypervisor: Docker')] = ['103571']
data[('Support generation 2 virtual machines', 'Hypervisor: Hyper-V')] = ['103945']
data[('Add power off and reboot support', 'Hypervisor: Hyper-V')] = ['104630']
data[('Use RemoteFX to expose GPU features of instances', 'Hypervisor: Hyper-V')] = ['105041']
data[('Support the rescue instance operation', 'Hypervisor: Hyper-V')] = ['105042']
data[('Allow volumes to be stored on SMB shares instead of just iSCSI', 'Hypervisor: Hyper-V')] = ['102190']
data[('Allow the creation of highly available instances', 'Hypervisor: Hyper-V')] = ['105094']
data[('Add config drive support', 'Hypervisor: Ironic')] = ['98930']
data[('Support for selecting ironic nodes based on boot mode', 'Hypervisor: Ironic')] = ['108582']
data[('Make hugepages accessible to instances', 'Hypervisor: libvirt')] = ['96821']
data[('Allow flavours to specify which libvirt storage engine is used', 'Hypervisor: libvirt')] = ['91957']
data[('Enable Intel dpdkvhost support for attaching VIFs to instances', 'Hypervisor: libvirt')] = ['95805']
data[('Separate out the various supported virtualization types (kvm, lxc, etc) into separate classes', 'Hypervisor: libvirt')] = ['91460']
data[('Add support for SMBFS as a volume type', 'Hypervisor: libvirt')] = ['103203']
data[('Add support for StorPool volumes', 'Hypervisor: libvirt')] = ['115716']
data[('Add support for TPM pass-through to instances', 'Hypervisor: libvirt')] = ['85558']
data[('Use libvirt\'s sharing policy feature to control access to VNC consoles', 'Hypervisor: libvirt')] = ['86901']
data[('Allow instances to be pinned to specific hypervisor CPUs', 'Hypervisor: libvirt')] = ['92054']
data[('Make the virtio driver more configurable', 'Hypervisor: libvirt')] = ['103797']
data[('Allow instances to be booted via PXE instead of downloading an image from glance', 'Hypervisor: libvirt')] = ['118474']
data[('Enable vCPUs to be added to running instances', 'Hypervisor: libvirt')] = ['86273']
data[('Make the USB controller exposed to instances configurable', 'Hypervisor: libvirt')] = ['88334']
data[('Provide an alternative place to store vCenter usernames and passwords instead of nova.conf', 'VMWare specific features')] = ['85502', '85510']
data[('Add support for the Glance VMWare image store support', 'VMWare specific features')] = ['84281']
data[('Expose vCenter resource pools', 'VMWare specific features')] = ['84629']
data[('Refactor utility classes', 'VMWare specific features')] = ['84535']
data[('Allow Nova to access a VMWare image store over NFS', 'VMWare specific features')] = ['104211']
data[('Image cache improvements', 'VMWare specific features')] = ['84662']
new_data = {}
for title, topic in data:
new_data.setdefault(topic, {})
new_data[topic][title] = data[(title, topic)]
with open('juno.json', 'w') as f:
f.write(json.dumps(new_data, indent=4, sort_keys=True))
|
{
"content_hash": "67e98ee5ce4a8f5889654b959d3e9bfe",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 169,
"avg_line_length": 74.43,
"alnum_prop": 0.7163778046486632,
"repo_name": "rcbau/hacks",
"id": "b659580651d3a4b7807d1b3891375132081b1bb4",
"size": "7462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/specs/import_wiki.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "716"
},
{
"name": "Python",
"bytes": "123460"
},
{
"name": "Shell",
"bytes": "3021"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from cypy.graph import FrozenGraph, Node, relationship_type, graph_order, graph_size
from cypy.graph.store import PropertyDict
alice = Node("Person", "Employee", name="Alice", age=33)
bob = Node("Person")
carol = Node("Person")
dave = Node("Person")
KNOWS = relationship_type("KNOWS")
LIKES = relationship_type("LIKES")
DISLIKES = relationship_type("DISLIKES")
MARRIED_TO = relationship_type("MARRIED_TO")
WORKS_FOR = relationship_type("WORKS_FOR")
alice_knows_bob = KNOWS(alice, bob, since=1999)
alice_likes_carol = LIKES(alice, carol)
carol_dislikes_bob = DISLIKES(carol, bob)
carol_married_to_dave = MARRIED_TO(carol, dave)
dave_works_for_dave = WORKS_FOR(dave, dave)
class PropertyCoercionTestCase(TestCase):
def test_boolean(self):
props = PropertyDict({"value": True})
assert props == {"value": True}
def test_integer_in_range(self):
props = PropertyDict({"value": 1})
assert props == {"value": 1}
def test_integer_too_high(self):
with self.assertRaises(ValueError):
PropertyDict({"value": 2 ** 64})
def test_integer_too_low(self):
with self.assertRaises(ValueError):
PropertyDict({"value": -(2 ** 64)})
def test_float(self):
props = PropertyDict({"value": 3.141})
assert props == {"value": 3.141}
def test_byte_strings_are_supported(self):
props = PropertyDict({"value": b"hello, world"})
assert props == {"value": b"hello, world"}
def test_unicode_strings_are_supported(self):
props = PropertyDict({"value": u"hello, world"})
assert props == {"value": u"hello, world"}
def test_byte_arrays_are_supported(self):
props = PropertyDict({"value": bytearray(b"hello, world")})
self.assertEqual(props, {"value": b"hello, world"})
def test_homogenous_list(self):
props = PropertyDict({"value": [1, 2, 3]})
assert props == {"value": [1, 2, 3]}
class PropertySetTestCase(TestCase):
def test_equality(self):
first = PropertyDict({"name": "Alice", "age": 33, "colours": ["red", "purple"]})
second = PropertyDict({"name": "Alice", "age": 33, "colours": ["red", "purple"]})
assert first == second
def test_inequality(self):
first = PropertyDict({"name": "Alice", "age": 33, "colours": ["red", "purple"]})
second = PropertyDict({"name": "Bob", "age": 44, "colours": ["blue", "purple"]})
assert first != second
def test_getter(self):
properties = PropertyDict({"name": "Alice"})
assert properties["name"] == "Alice"
def test_getter_with_none(self):
properties = PropertyDict({"name": "Alice"})
assert properties["age"] is None
def test_setter(self):
properties = PropertyDict({"name": "Alice"})
properties["age"] = 33
assert properties == {"name": "Alice", "age": 33}
def test_setter_with_none(self):
properties = PropertyDict({"name": "Alice", "age": 33})
properties["age"] = None
assert properties == {"name": "Alice"}
def test_setter_with_none_for_non_existent(self):
properties = PropertyDict({"name": "Alice"})
properties["age"] = None
assert properties == {"name": "Alice"}
def test_setdefault_without_default_with_existing(self):
properties = PropertyDict({"name": "Alice", "age": 33})
value = properties.setdefault("age")
assert properties == {"name": "Alice", "age": 33}
assert value == 33
def test_setdefault_without_default_with_non_existent(self):
properties = PropertyDict({"name": "Alice"})
value = properties.setdefault("age")
assert properties == {"name": "Alice"}
assert value is None
def test_setdefault_with_default_with_existing(self):
properties = PropertyDict({"name": "Alice", "age": 33})
value = properties.setdefault("age", 34)
assert properties == {"name": "Alice", "age": 33}
assert value == 33
def test_setdefault_with_default_with_non_existent(self):
properties = PropertyDict({"name": "Alice"})
value = properties.setdefault("age", 33)
assert properties == {"name": "Alice", "age": 33}
assert value == 33
def test_deleter(self):
properties = PropertyDict({"name": "Alice", "age": 33})
del properties["age"]
assert properties == {"name": "Alice"}
class SubgraphTestCase(TestCase):
subgraph = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob |
carol_married_to_dave | dave_works_for_dave)
def test_nodes(self):
assert set(self.subgraph.nodes()) == {alice, bob, carol, dave}
def test_relationships(self):
assert set(self.subgraph.relationships()) == {alice_knows_bob, alice_likes_carol, carol_dislikes_bob,
carol_married_to_dave, dave_works_for_dave}
def test_order(self):
assert graph_order(self.subgraph) == 4
def test_size(self):
assert graph_size(self.subgraph) == 5
def test_can_infer_nodes_through_relationships(self):
f = FrozenGraph(alice_knows_bob)
assert graph_order(f) == 2
assert graph_size(f) == 1
assert set(f.nodes()) == {alice, bob}
assert set(f.relationships()) == {alice_knows_bob}
def test_equality(self):
other_subgraph = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob |
carol_married_to_dave | dave_works_for_dave)
assert self.subgraph == other_subgraph
assert hash(self.subgraph) == hash(other_subgraph)
def test_inequality(self):
other_subgraph = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob |
carol_married_to_dave)
assert self.subgraph != other_subgraph
assert hash(self.subgraph) != hash(other_subgraph)
def test_inequality_with_other_types(self):
assert self.subgraph != "this is not a graph"
def test_len(self):
assert len(self.subgraph) == 5
def test_bool(self):
assert self.subgraph.__bool__() is True
assert self.subgraph.__nonzero__() is True
def test_empty_subgraph(self):
_ = FrozenGraph()
# class WalkableTestCase(TestCase):
#
# sequence = (alice, alice_knows_bob, bob, carol_dislikes_bob, carol)
# walkable = Walkable(sequence)
#
# def test_nodes(self):
# nodes = self.walkable.nodes()
# assert isinstance(nodes, tuple)
# assert nodes == (alice, bob, carol)
#
# def test_relationships(self):
# relationships = self.walkable.relationships()
# assert isinstance(relationships, tuple)
# assert relationships == (alice_knows_bob, carol_dislikes_bob)
#
# def test_length(self):
# assert len(self.walkable) == 2
#
# def test_order(self):
# assert Subgraph(self.walkable).order() == 3
#
# def test_size(self):
# assert Subgraph(self.walkable).size() == 2
#
# def test_equality(self):
# other_subgraph = Walkable(self.sequence)
# assert self.walkable == other_subgraph
# assert hash(self.walkable) == hash(other_subgraph)
#
# def test_inequality(self):
# other_subgraph = Walkable([alice, alice_likes_carol, carol,
# carol_dislikes_bob, bob])
# assert self.walkable != other_subgraph
# assert hash(self.walkable) != hash(other_subgraph)
#
# def test_inequality_with_other_types(self):
# assert self.walkable != "this is not a graph"
#
# def test_iteration(self):
# assert tuple(iter(self.walkable)) == (alice_knows_bob, carol_dislikes_bob)
#
# def test_slicing(self):
# sequence = (alice, alice_knows_bob, bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave, dave_works_for_dave, dave)
# subgraph = Walkable(sequence)
# assert subgraph[0] == alice_knows_bob
# assert subgraph[1] == carol_dislikes_bob
# assert subgraph[2] == carol_married_to_dave
# assert subgraph[3] == dave_works_for_dave
# assert subgraph[0:0] == Walkable([alice])
# assert subgraph[0:1] == Walkable([alice, alice_knows_bob, bob])
# assert subgraph[0:2] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol])
# assert subgraph[0:3] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol,
# carol_married_to_dave, dave])
# assert subgraph[0:4] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[0:5] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[0:] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[:1] == Walkable([alice, alice_knows_bob, bob])
# assert subgraph[1:1] == Walkable([bob])
# assert subgraph[1:2] == Walkable([bob, carol_dislikes_bob, carol])
# assert subgraph[1:3] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave])
# assert subgraph[1:4] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[1:5] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[1:] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[:2] == Walkable([alice, alice_knows_bob, bob,
# carol_dislikes_bob, carol])
# assert subgraph[2:2] == Walkable([carol])
# assert subgraph[2:3] == Walkable([carol, carol_married_to_dave, dave])
# assert subgraph[2:4] == Walkable([carol, carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[2:5] == Walkable([carol, carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[2:] == Walkable([carol, carol_married_to_dave, dave,
# dave_works_for_dave, dave])
# assert subgraph[1:-1] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave])
# assert subgraph[-3:-1] == Walkable([bob, carol_dislikes_bob, carol,
# carol_married_to_dave, dave])
class NodeTestCase(TestCase):
def test_order(self):
assert graph_order(alice) == 1
def test_size(self):
assert graph_size(alice) == 0
def test_empty_node(self):
n = Node()
assert not n.__bool__()
assert not n.__nonzero__()
assert len(n) == 0
def test_node(self):
assert alice.__bool__()
assert alice.__nonzero__()
assert len(alice) == 2
assert set(alice.labels()) == {"Person", "Employee"}
assert dict(alice) == {"name": "Alice", "age": 33}
assert dict(alice)["name"] == "Alice"
assert alice["name"] == "Alice"
assert graph_order(alice) == 1
assert graph_size(alice) == 0
def test_equality(self):
other_node = Node("Person", "Employee", name="Alice", age=33)
assert alice == other_node
def test_inequality_by_properties(self):
other_node = Node("Person", "Employee", name="Alice", age=44)
assert alice != other_node
def test_inequality_by_labels(self):
other_node = Node("Person", name="Alice", age=33)
assert alice != other_node
def test_inequality_with_other_types(self):
assert alice != "this is not a node"
class RelationshipTestCase(TestCase):
def test_nodes(self):
assert alice_knows_bob.nodes() == (alice, bob)
def test_order(self):
assert graph_order(alice_knows_bob) == 2
def test_size(self):
assert graph_size(alice_knows_bob) == 1
def test_relationship(self):
assert alice_knows_bob.nodes() == (alice, bob)
assert type(alice_knows_bob) == KNOWS
assert dict(alice_knows_bob) == {"since": 1999}
assert alice_knows_bob["since"] == 1999
assert graph_order(alice_knows_bob) == 2
assert graph_size(alice_knows_bob) == 1
assert set(alice_knows_bob.nodes()) == {alice, bob}
def test_loop(self):
assert dave_works_for_dave.nodes() == (dave, dave)
assert graph_order(dave_works_for_dave) == 1
assert graph_size(dave_works_for_dave) == 1
assert set(dave_works_for_dave.nodes()) == {dave}
def test_equality(self):
other_rel = alice_knows_bob
assert alice_knows_bob == other_rel
def test_inequality(self):
other_rel = KNOWS(alice, bob, since=1999)
assert alice != other_rel
def test_inequality_with_other_types(self):
assert alice_knows_bob != "there is no relationship"
class RelationshipLoopTestCase(TestCase):
loop = LIKES(alice, alice)
def test_type(self):
assert type(self.loop) == LIKES
def test_nodes(self):
assert self.loop.nodes() == (alice, alice)
def test_order(self):
assert graph_order(self.loop) == 1
def test_size(self):
assert graph_size(self.loop) == 1
# class PathTestCase(TestCase):
#
# path = Path(alice, alice_knows_bob, bob, alice_knows_bob, alice, alice_likes_carol, carol)
#
# def test_nodes(self):
# nodes = self.path.nodes()
# assert isinstance(nodes, tuple)
# assert nodes == (alice, bob, alice, carol)
#
# def test_relationships(self):
# relationships = self.path.relationships()
# assert isinstance(relationships, tuple)
# assert relationships == (alice_knows_bob, alice_knows_bob, alice_likes_carol)
#
# def test_order(self):
# assert Subgraph(self.path).order() == 3
#
# def test_size(self):
# assert Subgraph(self.path).size() == 2
#
# def test_length(self):
# assert len(self.path) == 3
#
# def test_construction_of_path_length_0(self):
# sequence = [alice]
# path = Path(*sequence)
# assert Subgraph(path).order() == 1
# assert Subgraph(path).size() == 0
# assert len(path) == 0
# assert set(path.nodes()) == {alice}
# assert set(path.relationships()) == set()
# assert path.start_node() == alice
# assert path.end_node() == alice
# assert len(path) == 0
# assert list(walk(path)) == sequence
#
# def test_construction_of_path_length_1(self):
# sequence = [alice, alice_knows_bob, bob]
# path = Path(*sequence)
# assert Subgraph(path).order() == 2
# assert Subgraph(path).size() == 1
# assert len(path) == 1
# assert set(path.nodes()) == {alice, bob}
# assert set(path.relationships()) == {alice_knows_bob}
# assert path.start_node() == alice
# assert path.end_node() == bob
# assert len(path) == 1
# assert list(walk(path)) == sequence
#
# def test_construction_of_path_length_2(self):
# sequence = [alice, alice_knows_bob, bob, carol_dislikes_bob, carol]
# path = Path(*sequence)
# assert Subgraph(path).order() == 3
# assert Subgraph(path).size() == 2
# assert len(path) == 2
# assert set(path.nodes()) == {alice, bob, carol}
# assert set(path.relationships()) == {alice_knows_bob, carol_dislikes_bob}
# assert path.start_node() == alice
# assert path.end_node() == carol
# assert len(path) == 2
# assert list(walk(path)) == sequence
#
# def test_construction_of_path_with_revisits(self):
# sequence = [alice, alice_knows_bob, bob, carol_dislikes_bob, carol,
# alice_likes_carol, alice, alice_knows_bob, bob]
# path = Path(*sequence)
# assert Subgraph(path).order() == 3
# assert Subgraph(path).size() == 3
# assert len(path) == 4
# assert set(path.nodes()) == {alice, bob, carol}
# assert set(path.relationships()) == {alice_knows_bob, alice_likes_carol, carol_dislikes_bob}
# assert path.start_node() == alice
# assert path.end_node() == bob
# assert len(path) == 4
# assert list(walk(path)) == sequence
#
# def test_construction_of_path_with_loop(self):
# sequence = [carol, carol_married_to_dave, dave, dave_works_for_dave, dave]
# path = Path(*sequence)
# assert Subgraph(path).order() == 2
# assert Subgraph(path).size() == 2
# assert len(path) == 2
# assert set(path.nodes()) == {carol, dave}
# assert set(path.relationships()) == {carol_married_to_dave, dave_works_for_dave}
# assert path.start_node() == carol
# assert path.end_node() == dave
# assert len(path) == 2
# assert list(walk(path)) == sequence
#
# def test_path_indexing(self):
# sequence = [alice_knows_bob, carol_dislikes_bob, carol_married_to_dave]
# path = Path(*sequence)
# assert path[0] == alice_knows_bob
# assert path[1] == carol_dislikes_bob
# assert path[2] == carol_married_to_dave
# assert path[-3] == alice_knows_bob
# assert path[-2] == carol_dislikes_bob
# assert path[-1] == carol_married_to_dave
# with self.assertRaises(IndexError):
# _ = path[3]
# class WalkTestCase(TestCase):
#
# def test_can_walk_nothing(self):
# result = list(walk())
# assert result == []
#
# def test_can_walk_node(self):
# result = list(walk(alice))
# assert result == [alice]
#
# def test_can_walk_node_twice(self):
# result = list(walk(alice, alice))
# assert result == [alice]
#
# def test_can_walk_node_and_relationship(self):
# result = list(walk(alice, alice_knows_bob))
# assert result == [alice, alice_knows_bob, bob]
#
# def test_can_walk_node_relationship_and_node(self):
# result = list(walk(alice, alice_knows_bob, bob))
# assert result == [alice, alice_knows_bob, bob]
#
# def test_can_walk_node_relationship_and_node_in_reverse(self):
# result = list(walk(bob, alice_knows_bob, alice))
# assert result == [bob, alice_knows_bob, alice]
#
# def test_cannot_walk_non_walkable_as_first_argument(self):
# with self.assertRaises(TypeError):
# list(walk(object()))
#
# def test_cannot_walk_non_walkable_as_second_argument(self):
# with self.assertRaises(TypeError):
# list(walk(alice, object()))
# class ConcatenationTestCase(TestCase):
#
# def test_can_concatenate_node_and_node(self):
# result = alice + alice
# assert result == Walkable([alice])
#
# def test_can_concatenate_node_and_relationship(self):
# result = alice + alice_knows_bob
# assert result == Walkable([alice, alice_knows_bob, bob])
#
# def test_can_concatenate_node_and_reversed_relationship(self):
# bob_knows_alice = Relationship(bob, "KNOWS", alice)
# result = alice + bob_knows_alice
# assert result == Walkable([alice, bob_knows_alice, bob])
#
# def test_can_concatenate_node_and_path(self):
# path = Walkable([alice, alice_knows_bob, bob])
# result = alice + path
# assert result == path
#
# def test_can_concatenate_node_and_reversed_path(self):
# result = alice + Walkable([bob, alice_knows_bob, alice])
# assert result == Walkable([alice, alice_knows_bob, bob])
#
# def test_can_concatenate_relationship_and_node(self):
# result = alice_knows_bob + bob
# assert result == Walkable([alice, alice_knows_bob, bob])
#
# def test_can_concatenate_relationship_and_relationship(self):
# result = alice_knows_bob + carol_dislikes_bob
# assert result == Walkable([alice, alice_knows_bob, bob, carol_dislikes_bob, carol])
#
# def test_can_concatenate_relationship_and_path(self):
# result = alice_knows_bob + Walkable([bob, carol_dislikes_bob, carol])
# assert result == Walkable([alice, alice_knows_bob, bob, carol_dislikes_bob, carol])
#
# def test_can_concatenate_path_and_node(self):
# result = Walkable([alice, alice_knows_bob, bob]) + bob
# assert result == Walkable([alice, alice_knows_bob, bob])
#
# def test_can_concatenate_path_and_relationship(self):
# result = Walkable([alice, alice_knows_bob, bob]) + carol_dislikes_bob
# assert result == Walkable([alice, alice_knows_bob, bob, carol_dislikes_bob, carol])
#
# def test_can_concatenate_path_and_path(self):
# result = (Walkable([alice, alice_knows_bob, bob]) +
# Walkable([bob, carol_dislikes_bob, carol]))
# assert result == Walkable([alice, alice_knows_bob, bob, carol_dislikes_bob, carol])
#
# def test_cannot_concatenate_different_endpoints(self):
# with self.assertRaises(ValueError):
# _ = alice + bob
#
# def test_can_concatenate_node_and_none(self):
# result = alice + None
# assert result is alice
class UnionTestCase(TestCase):
def test_graph_union(self):
graph_1 = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob)
graph_2 = (carol_dislikes_bob | carol_married_to_dave | dave_works_for_dave)
graph = graph_1 | graph_2
assert graph_order(FrozenGraph(graph)) == 4
assert graph_size(FrozenGraph(graph)) == 5
assert set(graph.nodes()) == {alice, bob, carol, dave}
# class IntersectionTestCase(TestCase):
#
# def test_graph_intersection(self):
# graph_1 = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob)
# graph_2 = (carol_dislikes_bob | carol_married_to_dave | dave_works_for_dave)
# graph = graph_1 & graph_2
# assert Subgraph(graph).order() == 2
# assert Subgraph(graph).size() == 1
# assert graph.nodes() == (bob | carol).nodes()
# class DifferenceTestCase(TestCase):
#
# def test_graph_difference(self):
# graph_1 = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob)
# graph_2 = (carol_dislikes_bob | carol_married_to_dave | dave_works_for_dave)
# graph = graph_1 - graph_2
# assert Subgraph(graph).order() == 3
# assert Subgraph(graph).size() == 2
# assert graph.nodes() == (alice | bob | carol).nodes()
# class SymmetricDifferenceTestCase(TestCase):
#
# def test_graph_symmetric_difference(self):
# graph_1 = (alice_knows_bob | alice_likes_carol | carol_dislikes_bob)
# graph_2 = (carol_dislikes_bob | carol_married_to_dave | dave_works_for_dave)
# graph = graph_1 ^ graph_2
# assert Subgraph(graph).order() == 4
# assert Subgraph(graph).size() == 4
# assert graph.nodes() == (alice | bob | carol | dave).nodes()
# assert graph.relationships() == frozenset(alice_knows_bob | alice_likes_carol |
# carol_married_to_dave | dave_works_for_dave)
|
{
"content_hash": "1c8871695844f47c29eb473c1fbb62ac",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 109,
"avg_line_length": 39.749590834697216,
"alnum_prop": 0.5784987853584223,
"repo_name": "technige/cypy",
"id": "fd5c974feeb2ddebe60510861feb1a9bb52b2a1e",
"size": "24913",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/graph/test_py2neo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "164557"
},
{
"name": "Shell",
"bytes": "254"
}
],
"symlink_target": ""
}
|
import os
import sys
import urllib
from optparse import OptionParser
from twisted.internet import reactor
from twisted.python import failure
import coil
from nagcat import errors, graph, log, plugin
# Attempt to retry after failures 6 times at 20 second intervals
RETRY_INTERVAL = 20
RETRY_LIMIT = 6
# Which template to use for each notification type
NOTIFICATION_TEMPLATES = {
'alert': ('PROBLEM', 'RECOVERY'),
'comment': ('ACKNOWLEDGEMENT', 'DOWNTIME', 'CUSTOM'),
'flapping': ('FLAPPING',),
}
DEFAULT_CONFIG = '''
metadata: { }
urls: {
nagios: None
graphs: None
}
rradir: None
host: {
subject: "{NOTIFICATIONTYPE} {HOSTNAME} is {HOSTSTATE}"
long: {
alert: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {HOSTSTATE}
Info: {HOSTOUTPUT}
Date: {LONGDATETIME}
"""
comment: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
Author: {NOTIFICATIONAUTHOR}
Comment: {NOTIFICATIONCOMMENT}
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {HOSTSTATE}
Info: {HOSTOUTPUT}
Date: {LONGDATETIME}
"""
flapping: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
No notifications are sent while the host state is flapping.
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {HOSTSTATE}
Info: {HOSTOUTPUT}
Date: {LONGDATETIME}
"""
}
short: {
alert: """Host {HOSTALIAS}
Info: {HOSTOUTPUT}
Date: {SHORTDATETIME}
"""
comment: """Host {HOSTALIAS}
Author: {NOTIFICATIONAUTHOR}
Comment: {NOTIFICATIONCOMMENT}
Date: {SHORTDATETIME}
"""
flapping: """Host {HOSTALIAS}
Date: {SHORTDATETIME}
"""
}
}
service: {
subject: "{NOTIFICATIONTYPE} {HOSTALIAS}/{SERVICEDESC} is {SERVICESTATE}"
long: {
alert: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
Service: {SERVICEDESC}
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {SERVICESTATE}
Info: {SERVICEOUTPUT}
{LONGSERVICEOUTPUT}
Date: {LONGDATETIME}
"""
comment: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
Author: {NOTIFICATIONAUTHOR}
Comment: {NOTIFICATIONCOMMENT}
Service: {SERVICEDESC}
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {SERVICESTATE}
Info: {SERVICEOUTPUT}
Date: {LONGDATETIME}
"""
flapping: """***** Nagios *****
Type: {NOTIFICATIONTYPE}
No notifications are sent while the service state is flapping.
Service: {SERVICEDESC}
Host: {HOSTALIAS}
Address: {HOSTADDRESS}
State: {SERVICESTATE}
Info: {SERVICEOUTPUT}
Date: {LONGDATETIME}
"""
}
short: {
alert: """Service: {SERVICEDESC}
Host: {HOSTALIAS}
Info: {SERVICEOUTPUT}
Date: {SHORTDATETIME}
"""
comment: """Service: {SERVICEDESC}
Host: {HOSTALIAS}
Author: {NOTIFICATIONAUTHOR}
Comment: {NOTIFICATIONCOMMENT}
Date: {SHORTDATETIME}
"""
flapping: """Service: {SERVICEDESC}
Host: {HOSTALIAS}
Date: {SHORTDATETIME}
"""
}
}
'''
class NotificationError(Exception):
"""Generic known-error"""
class MissingMacro(NotificationError):
"""A Nagios macro expected in the template is missing"""
def __init__(self, name):
super(MissingMacro, self).__init__("Missing Nagios macro: %s" % name)
class Macros(dict):
"""Fetch the various Nagios macros from the environment.
This allows notifications to use the macros as they would appear
in Nagios config files which are a little shorter.
Also provide a special Exception for missing macros.
"""
def __init__(self, environ):
for key, value in environ.iteritems():
if not key.startswith("NAGIOS_"):
continue
key = key.replace("NAGIOS_", "", 1)
if key.startswith("LONG") and key.endswith("OUTPUT"):
value = value.replace(r'\n', '\n')
self[key] = value
def __getitem__(self, key):
try:
return super(Macros, self).__getitem__(key)
except KeyError:
raise MissingMacro(key)
class INotification(plugin.INagcatPlugin):
"""Interface provided by Notification plugin classes"""
class Notification(object):
"""Base notification class."""
#: Name of this notification method
name = None
#: Format to use for generating text
format = "long"
#: Default config options for this class.
# (may be a string, dict, or Struct)
defaults = {}
def __init__(self, type_, macros, config):
assert type_ in ('host', 'service')
self.type = type_
self.macros = macros
self.config = config
self.trend = None
# Attempt to generate an rrdtool graph if this is a Nagcat service
if (type_ == "service" and self.config['rradir']
and self.macros.get('_SERVICETEST', None)):
if not graph.available():
log.warn("RRDTool support is not available")
try:
self.trend = graph.Graph(self.config['rradir'],
self.macros['HOSTNAME'],
self.macros['SERVICEDESC'])
except errors.InitError, ex:
log.warn("Unable to load RRDTool info for %s/%s: %s" %
(self.macros['HOSTNAME'],
self.macros['SERVICEDESC'], ex))
def metadata(self, key, default=None):
macro = key.upper()
return self.macros.get('_CONTACT%s' % macro,
self.macros.get('_SERVICE%s' % macro,
self.macros.get('_HOST%s' % macro,
self.config.get('metadata.%s' % key, default))))
def subject(self):
return self._format(self.config[self.type]['subject'])
def body(self):
for template, notification in NOTIFICATION_TEMPLATES.iteritems():
if self.macros['NOTIFICATIONTYPE'].startswith(notification):
return self._format(
self.config[self.type][self.format][template])
raise NotificationError("Unknown notification type: %s" %
self.macros['NOTIFICATIONTYPE'])
def urls(self):
urls = {}
if self.type == "host":
if self.config['urls.nagios']:
urls['nagios'] = \
"%s/cgi-bin/status.cgi?host=%s" % (
self.config['urls.nagios'].rstrip("/"),
urllib.quote_plus(self.macros['HOSTNAME']))
if self.config['urls.graphs']:
urls['graphs'] = "%s/host.cgi?host=%s" % (
self.config['urls.graphs'].rstrip("/"),
urllib.quote_plus(self.macros['HOSTNAME']))
elif self.type == "service":
if self.config['urls.nagios']:
urls['nagios'] = \
"%s/cgi-bin/extinfo.cgi?type=2&host=%s&service=%s" % (
self.config['urls.nagios'].rstrip("/"),
urllib.quote_plus(self.macros['HOSTNAME']),
urllib.quote_plus(self.macros['SERVICEDESC']))
if self.config['urls.graphs']:
urls['graphs'] = "%s/service.cgi?host=%s&service=%s" % (
self.config['urls.graphs'].rstrip("/"),
urllib.quote_plus(self.macros['HOSTNAME']),
urllib.quote_plus(self.macros['SERVICEDESC']))
else:
assert 0
return urls
def graph(self):
if self.trend:
return self.trend.graph()
else:
return None
def coil(self):
if self.trend and not self.trend.private:
return str(self.trend.conf)
else:
return None
def send(self):
raise Exception("unimplemented")
def _format(self, text):
text = "\n".join(l.strip() for l in text.splitlines())
try:
return text.format(**self.macros)
except KeyError, ex:
raise MissingMacro(ex.args[0])
def parse_options():
notify_plugins = plugin.search(INotification)
notify_list = ", ".join(notify_plugins)
parser = OptionParser()
parser.add_option("-m", "--method",
help="notification method: %s" % notify_list)
parser.add_option("-H", "--host", action="store_true", default=False,
help="this is a host notification")
parser.add_option("-S", "--service", action="store_true", default=False,
help="this is a service notification")
parser.add_option("-c", "--config",
help="load notification coil config")
parser.add_option("-l", "--logfile",
help="log errors to a given file")
parser.add_option("-v", "--loglevel", default="WARN",
help="set a specific log level")
parser.add_option("-d", "--daemonize", action="store_true",
help="daemonize to avoid blocking nagios")
parser.add_option("-D", "--dump", action="store_true",
help="just dump the config")
options, args = parser.parse_args()
if args:
parser.error("unknown extra arguments: %s" % args)
if not options.method:
parser.error("--method is required")
if options.method not in notify_plugins:
parser.error("invalid method, choose from: %s" % notify_list)
if not options.dump and 1 != sum([options.host, options.service]):
parser.error("choose one and only one: host, service")
if options.daemonize and not options.logfile:
parser.error("--daemonize requires --log-file")
return options, notify_plugins[options.method]
def main():
options, method = parse_options()
log.init(options.logfile, options.loglevel)
if not options.dump and options.daemonize:
if os.fork() > 0:
os._exit(0)
os.chdir("/")
os.setsid()
if os.fork() > 0:
os._exit(0)
log.init_stdio()
try:
config = coil.parse(DEFAULT_CONFIG)
if method.defaults:
if isinstance(method.defaults, str):
config.merge(coil.parse(method.defaults))
else:
config.merge(coil.struct.Struct(method.defaults))
if options.config:
config.merge(coil.parse_file(options.config))
except coil.errors.CoilError, ex:
log.error("Error parsing config: %s" % ex)
sys.exit(1)
except IOError, ex:
log.error("Error reading config file: %s" % ex)
sys.exit(1)
if options.dump:
print str(config)
sys.exit(0)
macros = Macros(os.environ)
if not macros:
log.error("No Nagios environment variables found.")
sys.exit(1)
if options.host:
event_type = "host"
elif options.service:
event_type = "service"
else:
assert 0
notifier = method(event_type, macros, config)
exit_code = [-1]
def start():
try:
deferred = notifier.send()
deferred.addBoth(stop)
except Exception:
stop(failure.Failure())
def stop(result):
reactor.stop()
if isinstance(result, failure.Failure):
if isinstance(result.value, NotificationError):
log.error(str(result.value))
else:
log.error(str(result))
exit_code[0] = 1
else:
exit_code[0] = 0
reactor.callWhenRunning(start)
reactor.run()
sys.exit(exit_code[0])
|
{
"content_hash": "44ec1b9204bef98b7fda8a3fbfcc884e",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 78,
"avg_line_length": 28.477326968973745,
"alnum_prop": 0.5542239356352665,
"repo_name": "marineam/nagcat",
"id": "d63b286b75fc85920aed27177157e9a1fffa707a",
"size": "12515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/nagcat/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "119709"
},
{
"name": "Python",
"bytes": "572702"
},
{
"name": "Shell",
"bytes": "3443"
}
],
"symlink_target": ""
}
|
"""
Attempt 1 (2 May 2020) at building an automatic
PDF builder for NAMS hybrid Markdown and Jupyter files
"""
import nbformat
import yaml
from pyprojroot import here
with open(here() / "mkdocs.yml", "r+") as f:
f = "".join(l for l in f.readlines())
mkdocs_config = yaml.safe_load(f)
nav = mkdocs_config["nav"]
docroot = here() / "docs"
for navitems in nav:
for section, items in navitems.items():
if isinstance(items, list):
for item in items:
print(item)
else:
print(items)
def _convert_notebook(filepath):
"""Convert notebook into a Markdown file in memory."""
def _convert_markdown():
pass
suffix_converter = {".ipynb": _convert_notebook, ".md": _convert_markdown}
def convert_file(fname):
suffix = fname.split(".")[-1]
try:
converter_func = suffix_converter[suffix]
except KeyError:
raise KeyError(f"{fname} has unsupported suffix `{suffix}`!")
# TODO:
# - execute Jupyter nbconvert to convert notebooks to Markdown with outputs
# - inject titles into individual Markdown files
# - concatenate Markdown files into a master file
# - generate "custom" Markdown based on inputted "name", and add signature
# - compile Markdown files into a single PDF.
|
{
"content_hash": "4773a5d5bca50c5ce5756c492442305d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 24.5,
"alnum_prop": 0.6640502354788069,
"repo_name": "ericmjl/Network-Analysis-Made-Simple",
"id": "aa59f8a440fba062e2214616188b3f6d0e3702ea",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bookbuilder/toc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "294"
},
{
"name": "Dockerfile",
"bytes": "3587"
},
{
"name": "Jupyter Notebook",
"bytes": "622408"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Python",
"bytes": "57260"
},
{
"name": "Shell",
"bytes": "2579"
}
],
"symlink_target": ""
}
|
from django import forms
from vesper.django.app.clip_set_form import ClipSetForm
from vesper.singleton.preset_manager import preset_manager
import vesper.django.app.form_utils as form_utils
_FORM_TITLE = 'Export clip metadata to CSV file'
_TABLE_FORMAT_FIELD_LABEL = 'Clip table format preset'
def _get_field_default(name, default):
return form_utils.get_field_default(_FORM_TITLE, name, default)
class ExportClipMetadataToCsvFileForm(ClipSetForm):
table_format = forms.ChoiceField(
label=_TABLE_FORMAT_FIELD_LABEL,
initial=_get_field_default(_TABLE_FORMAT_FIELD_LABEL, None),
required=False)
output_file_path = forms.CharField(
label='Output file', max_length=255,
widget=forms.TextInput(attrs={'class': 'command-form-wide-input'}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Populate table format field.
self.fields['table_format'].choices = \
form_utils.get_preset_choices(
'Clip Table Format', include_none=False)
|
{
"content_hash": "d76fdc167a36cda20a20589f644cbe5f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 31.457142857142856,
"alnum_prop": 0.6630336058128974,
"repo_name": "HaroldMills/Vesper",
"id": "1554a00deef7a4c726a2f879c1b844939799278c",
"size": "1101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vesper/django/app/export_clip_metadata_to_csv_file_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "92"
},
{
"name": "CSS",
"bytes": "9101"
},
{
"name": "Dockerfile",
"bytes": "1678"
},
{
"name": "HTML",
"bytes": "70614"
},
{
"name": "JavaScript",
"bytes": "410277"
},
{
"name": "Python",
"bytes": "2697554"
},
{
"name": "Shell",
"bytes": "2772"
},
{
"name": "TypeScript",
"bytes": "30001"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from Bio import SeqIO
#%%#############################################################################
### User-defined files and folder structure
################################################################################
# Define data folders
genomeFolder = '../genomes/faa'
outputFolder = '../genomes/compliant'
# Create the output directory
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
# Get genome list
genomeList = []
for genome in os.listdir(genomeFolder):
if genome.endswith('.faa'):
genomeList.append(genome)
genomeList = [genome.replace('.faa', '') for genome in genomeList]
#%%#############################################################################
### Create a hash for mapping names to taxon IDs
################################################################################
taxonDict = {}
with open('taxonMapping.txt') as dictFile:
for line in dictFile:
(key, val) = line.split()
taxonDict[key] = val
#%%#############################################################################
### Read in each FASTA file. Update the file
################################################################################
for genome in genomeList:
inFile = open(genomeFolder+'/'+genome+'.faa', 'r')
outFile = open(outputFolder+'/'+taxonDict[genome]+'.fasta', 'w')
for record in SeqIO.parse(inFile, 'fasta') :
record.id = taxonDict[genome]+'|'+ record.description.split()[1]
record.description = ''
SeqIO.write(record, outFile, 'fasta')
inFile.close()
outFile.close()
#%%#############################################################################
### Subprocess call to orthomclFilterFasta
################################################################################
subprocess.call(['orthomclFilterFasta',
outputFolder, '10', '20',
'../genomes/good.fasta', '../genomes/bad.fasta' ])
|
{
"content_hash": "ce61fdf201dcbda2e8a435529d0e3b2a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 38.67307692307692,
"alnum_prop": 0.43560417702635507,
"repo_name": "joshamilton/Hamilton_acI_2017",
"id": "d7cab7b56d8a6d5a60752a71176e6d7c7b767092",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/orthoMCL/01faaParser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "209935"
},
{
"name": "Makefile",
"bytes": "543"
},
{
"name": "Perl",
"bytes": "6540"
},
{
"name": "Python",
"bytes": "86594"
},
{
"name": "Shell",
"bytes": "908"
},
{
"name": "TeX",
"bytes": "165291"
}
],
"symlink_target": ""
}
|
"""Hooks function definitions.
Hooks are functions that are called in different parts of the codebase,
and used to execute some functionality that might be only required in
certain environments.
"""
def set_lint_args_hook(args):
"""Called after lint command arguments were parsed."""
try:
# This is for Google-internal use only and allows us to modify
# default options for internal use.
# pylint: disable=import-outside-toplevel
from gcpdiag_google_internal import hooks as google_internal
google_internal.set_lint_args_hook(args)
except ImportError:
pass
def verify_access_hook(project_id: str):
"""Called to do additional authorization verifications."""
try:
# gcpdiag_google_internal contains code that we run only internally
# at Google, so this import will fail in the public version.
# pylint: disable=import-outside-toplevel
from gcpdiag_google_internal import hooks as google_internal
google_internal.verify_access_hook(project_id)
except ImportError:
pass
def request_builder_hook(*args, **kwargs):
"""Called when creating HTTP requests."""
try:
# This is for Google-internal use only and allows us to modify the request
# to make it work also internally. The import will fail for the public
# version of gcpdiag.
# pylint: disable=import-outside-toplevel
from gcpdiag_google_internal import hooks
hooks.request_builder_hook(*args, **kwargs)
except ImportError:
pass
def post_lint_hook(report):
"""Called after lint command has run."""
try:
# gcpdiag_google_internal contains code that we run only internally
# at Google, so this import will fail in the public version.
# pylint: disable=import-outside-toplevel
from gcpdiag_google_internal import hooks as google_internal
google_internal.post_lint_hook(report)
except ImportError:
pass
|
{
"content_hash": "fcc216e5d9ee8b71223685fc9faa33b9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 34.18181818181818,
"alnum_prop": 0.7351063829787234,
"repo_name": "GoogleCloudPlatform/gcpdiag",
"id": "73f8b40f99804a7c8967018fdeda097918321c4f",
"size": "2455",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gcpdiag/hooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4610"
},
{
"name": "HCL",
"bytes": "90111"
},
{
"name": "HTML",
"bytes": "8149"
},
{
"name": "Jinja",
"bytes": "1231"
},
{
"name": "Makefile",
"bytes": "51860"
},
{
"name": "Python",
"bytes": "792739"
},
{
"name": "SCSS",
"bytes": "1435"
},
{
"name": "Shell",
"bytes": "10973"
},
{
"name": "Smarty",
"bytes": "726"
}
],
"symlink_target": ""
}
|
import string
import types
import Tkinter
import Pmw
class MainMenuBar(Pmw.MegaArchetype):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('balloon', None, None),
('hotkeys', 1, INITOPT),
('hull_tearoff', 0, None),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Menu',))
# Initialise the base class (after defining the options).
Pmw.MegaArchetype.__init__(self, parent, Tkinter.Menu)
self._menuInfo = {}
self._menuInfo[None] = (None, [])
# Map from a menu name to a tuple of information about the menu.
# The first item in the tuple is the name of the parent menu (for
# toplevel menus this is None). The second item in the tuple is
# a list of status help messages for each item in the menu.
# The key for the information for the main menubar is None.
self._menu = self.interior()
self._menu.bind('<Leave>', self._resetHelpmessage)
self._menu.bind('<Motion>',
lambda event=None, self=self: self._menuHelp(event, None))
# Check keywords and initialise options.
self.initialiseoptions()
def deletemenuitems(self, menuName, start, end = None):
self.component(menuName).delete(start, end)
if end is None:
del self._menuInfo[menuName][1][start]
else:
self._menuInfo[menuName][1][start:end+1] = []
def deletemenu(self, menuName):
"""Delete should be called for cascaded menus before main menus.
"""
parentName = self._menuInfo[menuName][0]
del self._menuInfo[menuName]
if parentName is None:
parentMenu = self._menu
else:
parentMenu = self.component(parentName)
menu = self.component(menuName)
menuId = str(menu)
for item in range(parentMenu.index('end') + 1):
if parentMenu.type(item) == 'cascade':
itemMenu = str(parentMenu.entrycget(item, 'menu'))
if itemMenu == menuId:
parentMenu.delete(item)
del self._menuInfo[parentName][1][item]
break
self.destroycomponent(menuName)
def disableall(self):
for index in range(len(self._menuInfo[None][1])):
self.entryconfigure(index, state = 'disabled')
def enableall(self):
for index in range(len(self._menuInfo[None][1])):
self.entryconfigure(index, state = 'normal')
def addmenu(self, menuName, balloonHelp, statusHelp = None,
traverseSpec = None, **kw):
if statusHelp is None:
statusHelp = balloonHelp
self._addmenu(None, menuName, balloonHelp, statusHelp,
traverseSpec, kw)
def addcascademenu(self, parentMenuName, menuName, statusHelp='',
traverseSpec = None, **kw):
self._addmenu(parentMenuName, menuName, None, statusHelp,
traverseSpec, kw)
def _addmenu(self, parentMenuName, menuName, balloonHelp, statusHelp,
traverseSpec, kw):
if (menuName) in self.components():
raise ValueError, 'menu "%s" already exists' % menuName
menukw = {}
if kw.has_key('tearoff'):
menukw['tearoff'] = kw['tearoff']
del kw['tearoff']
else:
menukw['tearoff'] = 0
if kw.has_key('name'):
menukw['name'] = kw['name']
del kw['name']
if not kw.has_key('label'):
kw['label'] = menuName
self._addHotkeyToOptions(parentMenuName, kw, traverseSpec)
if parentMenuName is None:
parentMenu = self._menu
balloon = self['balloon']
# Bug in Tk: balloon help not implemented
# if balloon is not None:
# balloon.mainmenubind(parentMenu, balloonHelp, statusHelp)
else:
parentMenu = self.component(parentMenuName)
apply(parentMenu.add_cascade, (), kw)
menu = apply(self.createcomponent, (menuName,
(), 'Menu',
Tkinter.Menu, (parentMenu,)), menukw)
parentMenu.entryconfigure('end', menu = menu)
self._menuInfo[parentMenuName][1].append(statusHelp)
self._menuInfo[menuName] = (parentMenuName, [])
menu.bind('<Leave>', self._resetHelpmessage)
menu.bind('<Motion>',
lambda event=None, self=self, menuName=menuName:
self._menuHelp(event, menuName))
def addmenuitem(self, menuName, itemType, statusHelp = '',
traverseSpec = None, **kw):
menu = self.component(menuName)
if itemType != 'separator':
self._addHotkeyToOptions(menuName, kw, traverseSpec)
if itemType == 'command':
command = menu.add_command
elif itemType == 'separator':
command = menu.add_separator
elif itemType == 'checkbutton':
command = menu.add_checkbutton
elif itemType == 'radiobutton':
command = menu.add_radiobutton
elif itemType == 'cascade':
command = menu.add_cascade
else:
raise ValueError, 'unknown menuitem type "%s"' % itemType
self._menuInfo[menuName][1].append(statusHelp)
apply(command, (), kw)
def _addHotkeyToOptions(self, menuName, kw, traverseSpec):
if (not self['hotkeys'] or kw.has_key('underline') or
not kw.has_key('label')):
return
if type(traverseSpec) == types.IntType:
kw['underline'] = traverseSpec
return
if menuName is None:
menu = self._menu
else:
menu = self.component(menuName)
hotkeyList = []
end = menu.index('end')
if end is not None:
for item in range(end + 1):
if menu.type(item) not in ('separator', 'tearoff'):
underline = \
string.atoi(str(menu.entrycget(item, 'underline')))
if underline != -1:
label = str(menu.entrycget(item, 'label'))
if underline < len(label):
hotkey = string.lower(label[underline])
if hotkey not in hotkeyList:
hotkeyList.append(hotkey)
name = kw['label']
if type(traverseSpec) == types.StringType:
lowerLetter = string.lower(traverseSpec)
if traverseSpec in name and lowerLetter not in hotkeyList:
kw['underline'] = string.index(name, traverseSpec)
else:
targets = string.digits + string.letters
lowerName = string.lower(name)
for letter_index in range(len(name)):
letter = lowerName[letter_index]
if letter in targets and letter not in hotkeyList:
kw['underline'] = letter_index
break
def _menuHelp(self, event, menuName):
if menuName is None:
menu = self._menu
index = menu.index('@%d'% event.x)
else:
menu = self.component(menuName)
index = menu.index('@%d'% event.y)
balloon = self['balloon']
if balloon is not None:
if index is None:
balloon.showstatus('')
else:
if str(menu.cget('tearoff')) == '1':
index = index - 1
if index >= 0:
help = self._menuInfo[menuName][1][index]
balloon.showstatus(help)
def _resetHelpmessage(self, event=None):
balloon = self['balloon']
if balloon is not None:
balloon.clearstatus()
Pmw.forwardmethods(MainMenuBar, Tkinter.Menu, '_hull')
|
{
"content_hash": "da2cf6527fdfc6dc771d3e2b5208e702",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 35.83856502242153,
"alnum_prop": 0.5516766766766766,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "8d2ca404367d7a86b1e17c94cd76eae9ccdcbeea",
"size": "8008",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Panda3D-1.9.0/Pmw/Pmw_1_3/lib/PmwMainMenuBar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
class UploadResult(serializers.Serializer):
url = serializers.URLField()
thumb = serializers.URLField()
|
{
"content_hash": "0e09caad1e4e0db92b308a56f5c07716",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 30.4,
"alnum_prop": 0.7763157894736842,
"repo_name": "popara/jonny-api",
"id": "89e4977fe18d9c3a232afa0575f11acf685193dd",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upload/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42262"
},
{
"name": "HTML",
"bytes": "3899"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "83332"
}
],
"symlink_target": ""
}
|
from django.shortcuts import get_object_or_404
from django.views.generic import CreateView
from .forms import MindMapComponentForm
from .models import MindMap
from utils.class_based_views_helper import JSONResponseMixin
class MapComponentAddView(CreateView, JSONResponseMixin):
form_class = MindMapComponentForm
template_name = 'mindmap/map_component_add.html'
def get_form_kwargs(self, **kwargs):
kwargs = super(MapComponentAddView, self).get_form_kwargs(**kwargs)
kwargs['mindmap'] = get_object_or_404(MindMap, pk=self.kwargs['mindmap_pk'])
return kwargs
def form_valid(self, form):
form.save()
context_data = super(MapComponentAddView, self).get_context_data()
context_data.update({'form': form})
return self.render_to_response(context_data)
def render_to_response(self, context):
if self.kwargs.get('type') == 'json':
return JSONResponseMixin.render_to_response(self, context)
return super(MapComponentAddView, self).render_to_response(context)
|
{
"content_hash": "0ad6b44779ba1508db754a94f94d6b1f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 35.4,
"alnum_prop": 0.7109227871939736,
"repo_name": "ierror/BeautifulMind.io",
"id": "d2407536af5bc5693cf03c965244a8ba2970202f",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "beautifulmind/mindmap/views_class_based.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1629"
},
{
"name": "HTML",
"bytes": "24327"
},
{
"name": "JavaScript",
"bytes": "34081"
},
{
"name": "Python",
"bytes": "53413"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'woeid'
copyright = u'2016, Renchen Sun'
author = u'Renchen Sun'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'woeid v1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = 'favicon3.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'woeiddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'woeid.tex', u'woeid Documentation',
u'Renchen Sun', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'woeid', u'woeid Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'woeid', u'woeid Documentation',
author, 'woeid', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
{
"content_hash": "4ca4744956e7eaac4abbc17bf54a516a",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 80,
"avg_line_length": 28.37846153846154,
"alnum_prop": 0.6875203296107557,
"repo_name": "Ray-SunR/woeid",
"id": "de5b0d08d649518ce1d4d1aca8cf16c0061310bf",
"size": "9881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48433"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class PowerShellTabCompletionResults(Model):
"""An array of strings representing the different values that can be selected
through.
:param results:
:type results: list[str]
"""
_attribute_map = {
'results': {'key': 'results', 'type': '[str]'},
}
def __init__(self, *, results=None, **kwargs) -> None:
super(PowerShellTabCompletionResults, self).__init__(**kwargs)
self.results = results
|
{
"content_hash": "d55f61f42400fedd9d29918d9c610653",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.6344969199178645,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e0c7c8c3cb55f55f7b776249aad0b85eb420910b",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servermanager/azure-mgmt-servermanager/azure/mgmt/servermanager/models/power_shell_tab_completion_results_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import string
class File:
# constractor
def __init__(self, absolute_path):
self.apath = absolute_path
# returns file type of the classes path
def get_ftype(self):
if os.path.lexists(self.apath):
if os.path.islink(self.apath):
return "link"
elif os.path.isdir(self.apath):
return "directory"
elif os.stat(self.apath).st_nlink > 1:
return "hard"
else:
# could be many other things, but defaulting to file
return "file"
return "absent"
# touch the file
def touch(self):
with open(self.apath, 'a') as f:
# set access and modified time to now
os.utime(self.apath, None)
# add content to the end of file
def append_to_file(self, content):
with open(self.apath, 'a') as f:
# split content by new line
f.write(content)
# replace first occurance in file with patterns
def replace_in_file(self, pattern, replacement):
# create temp file
fhandle, tmp_path = tempfile.mkstemp()
with open(tmp_path,"w") as tmp_file:
# open file for reading
with open(self.apath, "r") as old_file:
found = False
for line in old_file:
# find only first occurence in the file write it to temp
if line.find(pattern) != -1 and not found:
# write to temp file and replace if pattern found
tmp_file.write(line.replace(pattern, replacement))
found = True
else:
tmp_file.write(line)
# close file handle
os.close(fhandle)
# clean up original file
os.remove(self.apath)
# move new file
os.rename(tmp_path, self.apath)
# check if string pattern in file
# returns True and index if pattern found
def is_in_file(self, pattern):
# for multiline pattern
if not isinstance(pattern, list):
pattern = pattern.splitlines()
# open file
with open(self.apath, "r") as f:
for line in f:
# find first occurience
if pattern[0] in line:
# more lines to check
if len(pattern) != 1:
return self.is_in_file(pattern[1:])
else:
return True
return False
# create empty file and move it to the specified location
# def create_file(self):
# fhandle, tmp_path = tempfile.mkstemp()
# # close file handle
# os.close(fhandle)
# # move new file
# os.rename(tmp_path, self.apath)
# insert content into file by defualt right into beginning
# returns possition/index of the insert
def insert_into_file(self, content, index=0):
# return -1 position if nothing happened
pos = -1
# create temp file
fhandle, tmp_path = tempfile.mkstemp()
with open(tmp_path,"w") as tmp_file:
# open file for reading
with open(self.apath, "r") as old_file:
for line in old_file:
# start at the beginning of old file
pos = 0
# just copy untill index
while index != pos:
tmp_file.write(line)
pos=pos+1
# split up multiline content
content = string.split(content, "\n")
for inline in content:
tmp_file.write(inline)
# write the rest of the file
tmp_file.write(line)
# close file handle
os.close(fhandle)
# clean up original file
os.remove(self.apath)
# move new file
os.rename(tmp_path, self.apath)
return pos
|
{
"content_hash": "5debba04b57b272e64f98fdfe692eedf",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 76,
"avg_line_length": 34.57377049180328,
"alnum_prop": 0.4973921289710763,
"repo_name": "dkoudlo/py-manage-server",
"id": "94e8241c1765f4506d4805589ab36353043c83c8",
"size": "4218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/file/file_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32786"
},
{
"name": "Shell",
"bytes": "753"
}
],
"symlink_target": ""
}
|
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.domains import Domain, ObjType, Index
from sphinx.util.nodes import make_refnode
class uplink_placeholder(nodes.General, nodes.Element):
def __init__(self, name):
self.name = name
super().__init__()
class sub_placeholder(nodes.General, nodes.Element):
def __init__(self, name):
self.name = name
super().__init__()
class Reg:
iname = 'reg'
def __init__(self, width, name, brief):
self.width = width
self.name = name
self.brief = brief
class Space:
iname = 'space'
def __init__(self, width, name, size, brief):
self.width = width
self.name = name
self.size = size
self.brief = brief
self.presubs = []
class ObjectDescription(Directive):
has_content = True
def after_content(self):
return []
def run(self):
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
self.env = self.state.document.settings.env
self.indexnode = addnodes.index(entries=[])
obj = self.make_obj()
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
# 'desctype' is a backwards compatible attribute
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
node.name = obj.name
obj.docname = self.env.docname
objects = self.env.domaindata['envy']['objects']
signode = addnodes.desc_signature('', '')
signode['first'] = True
node.append(signode)
self.make_signature(obj, signode)
if not noindex and self.name not in objects:
# only add target and index entry if this is the first
# description of the object with this name in this desc block
#self.add_target_and_index(self.name, sig, signode)
nid = obj.iname + '-' + self.name
signode['names'].append(nid)
signode['ids'].append(nid)
self.state.document.note_explicit_target(signode)
for loc in self.locs:
signode = addnodes.desc_signature('', '')
signode['first'] = False
node.append(signode)
signode += addnodes.desc_name(loc, loc)
node.append(uplink_placeholder(self.name))
if self.name in objects:
other = objects[self.name]
self.state_machine.reporter.warning('duplicate object {}, other instance in {}'.format(self.name, self.env.doc2path(other.docname)))
objects[self.name] = obj
contentnode = addnodes.desc_content()
node.append(contentnode)
self.env.temp_data['object'] = self.name
self.state.nested_parse(self.content, self.content_offset, contentnode)
self.env.temp_data['object'] = None
contentnode += self.after_content()
return [self.indexnode, node]
class EnvyReg(ObjectDescription):
required_arguments = 3
final_argument_whitespace = True
def make_obj(self):
width, self.name, rest = self.arguments
brief, *locs = rest.split('\n')
self.locs = locs # XXX nuke me
return Reg(int(width), self.name, brief)
def make_signature(self, obj, signode):
reg = "reg{} ".format(obj.width)
signode += addnodes.desc_addname(reg, reg)
signode += addnodes.desc_name(obj.name, obj.name)
class EnvySpace(ObjectDescription):
required_arguments = 4
final_argument_whitespace = True
option_spec = {
'root': directives.unchanged,
'noindex': directives.flag,
}
def after_content(self):
subnode = sub_placeholder(self.name)
return [subnode]
def make_obj(self):
width, self.name, size, rest = self.arguments
brief, *subs = rest.split('\n')
obj = Space(int(width), self.name, int(size, 16), brief)
for sub in subs:
pos, name, ref, *rest = sub.split()
if rest:
variants, *rest = rest
if variants == '*':
variants = None
else:
variants = None
if rest:
tags, = rest
else:
tags = None
obj.presubs.append((pos, name, ref, variants))
self.locs = []
return obj
def make_signature(self, obj, signode):
space = "{}-bit space ".format(obj.width)
signode += addnodes.desc_addname(space, space)
signode += addnodes.desc_name(self.name, self.name)
sz = " [{:#x}]".format(obj.size)
signode += addnodes.desc_addname(sz, sz)
def envy_connect(app, env):
objects = env.domaindata['envy']['objects']
# clear uplinks
for obj in objects.values():
obj.uplinks = []
# resolve space sub refs
for sp in objects.values():
if isinstance(sp, Space):
sp.subs = []
for pos, name, ref, variants in sp.presubs:
try:
obj = objects[ref]
except KeyError:
app.warn('space {} refers to nonexistent object {}'.format(sp.name, ref))
else:
if not isinstance(obj, (Reg, Space)):
app.warn('space {} refers to object {} of type {}'.format(sp.name, ref, type(obj)))
else:
sp.subs.append((pos, name, obj, variants))
obj.uplinks.append((sp, pos, name, variants))
def wrap_text_entry(txt):
entry = nodes.entry()
para = nodes.paragraph()
entry += para
para += nodes.Text(txt, txt)
return entry
def envy_resolve(app, doctree, fromdocname):
objects = app.env.domaindata['envy']['objects']
# add uplink info
for holder in doctree.traverse(uplink_placeholder):
obj = objects[holder.name]
links = []
for sp, pos, name, variants in obj.uplinks:
signode = addnodes.desc_signature('', '')
signode['first'] = False
signode += make_refnode(app.builder, fromdocname, sp.docname, sp.iname + '-' + sp.name, addnodes.desc_addname(sp.name, sp.name), sp.name)
text = ' {}: {}'.format(pos, name)
signode += addnodes.desc_name(text, text)
if variants is not None:
text = ' [{}]'.format(variants)
signode += addnodes.desc_annotation(text, text)
links.append(signode)
holder.replace_self(links)
# add subnode list
for holder in doctree.traverse(sub_placeholder):
obj = objects[holder.name]
add_variant = False
for pos, name, child, variants in obj.subs:
if variants is not None:
add_variant = True
table = nodes.table()
headers = [(1, 'Address'), (1, 'Name'), (10, 'Description')]
if add_variant:
headers.insert(1, (1, 'Variants'))
tgroup = nodes.tgroup(cols=len(headers))
table += tgroup
for colwidth, header in headers:
tgroup += nodes.colspec(colwidth=colwidth)
thead = nodes.thead()
tgroup += thead
headrow = nodes.row()
for colwidth, header in headers:
entry = nodes.entry()
para = nodes.paragraph()
entry += para
para += nodes.Text(header, header)
headrow += entry
thead += headrow
tbody = nodes.tbody()
tgroup += tbody
for pos, name, child, variants in obj.subs:
row = nodes.row()
row += wrap_text_entry(pos)
if add_variant:
row += wrap_text_entry('all' if variants is None else variants)
row += wrap_text_entry(name)
entry = nodes.entry()
para = nodes.paragraph()
entry += para
para += make_refnode(app.builder, fromdocname, child.docname, child.iname + '-' + child.name, nodes.Text(child.brief, child.brief), child.brief)
row += entry
tbody += row
holder.replace_self([table])
class EnvyDomain(Domain):
name = 'envy'
label = 'envytools'
object_types = {
}
directives = {
'reg': EnvyReg,
'space': EnvySpace,
}
roles = {
'obj': XRefRole(),
}
initial_data = {
'objects' : {} # name -> envydesc
}
data_version = 0
def clear_doc(self, docname):
for name, node in list(self.data['objects'].items()):
if node.docname == docname:
del self.data['objects'][name]
def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode):
obj = self.data['objects'].get(target)
if obj is not None:
return make_refnode(builder, fromdocname, obj.docname, obj.iname + '-' + obj.name, contnode, obj.brief)
def setup(app):
app.add_domain(EnvyDomain)
app.connect('env-updated', envy_connect)
app.connect('doctree-resolved', envy_resolve)
|
{
"content_hash": "85bfafad6635ff66ddff06c0e697a70e",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 156,
"avg_line_length": 33.37410071942446,
"alnum_prop": 0.565100237120069,
"repo_name": "kfractal/envytools",
"id": "2f2b8c28a0c5df1f8b16278ee97e3ff4edb71b6e",
"size": "9278",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/envy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2763595"
},
{
"name": "C++",
"bytes": "637"
},
{
"name": "CMake",
"bytes": "13569"
},
{
"name": "Lex",
"bytes": "3523"
},
{
"name": "Makefile",
"bytes": "1517"
},
{
"name": "Perl",
"bytes": "21141"
},
{
"name": "Python",
"bytes": "108790"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "Yacc",
"bytes": "8648"
}
],
"symlink_target": ""
}
|
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrieve the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import Variable
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ['copy_op_to_graph', 'copy_variable_to_graph', 'get_copied_op']
def copy_variable_to_graph(org_instance, to_graph, scope=''):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, Variable):
raise TypeError(str(org_instance) + ' is not a Variable')
#The name of the new variable
if scope != '':
new_name = (scope + '/' + org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if it's trainable.
trainable = (
org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = Variable(
init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables, scope=''):
"""Returns a copy of an operation from another Graph under a specified scope.
Given an `Operation` `org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If it's a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph, variables,
scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [
copy_op_to_graph(x, to_graph, variables, scope)
for x in op.control_inputs
]
#If it has inputs, call this function recursively on each.
new_inputs = [
copy_op_to_graph(x, to_graph, variables, scope) for x in op.inputs
]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op.node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op.op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def, to_graph, new_inputs, output_types,
new_control_inputs, input_types, new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._record_op_seen_by_control_dependencies(new_op)
# pylint: disable=protected-access
for device_function in to_graph._device_functions_outer_to_inner:
new_op._set_device(device_function(new_op))
# pylint: enable=protected-access
return new_op
else:
raise TypeError('Could not copy instance: ' + str(org_instance))
def get_copied_op(org_instance, graph, scope=''):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(
new_name, allow_tensor=True, allow_operation=True)
|
{
"content_hash": "35240101a946cb5ce860ea11860613df",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 33.13168724279836,
"alnum_prop": 0.6801639547882251,
"repo_name": "xodus7/tensorflow",
"id": "6c9ab6aeb87fd39b22ab4f28d69b432b15899a13",
"size": "8740",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/copy_graph/python/util/copy_elements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import os
import requests
import urllib.request, urllib.error, urllib.parse
import json
import sys
import time
import webbrowser
from stravalib import Client
from stravalib.exc import AccessUnauthorized
# we need the files "client_id", "auth_code" and "client_secret" inside this path
# if you change the path, dont forget to .gitignore it
path_for_files = "./authfiles/"
def internet_on():
try:
response=urllib.request.urlopen('http://www.google.com', timeout=1)
return True
except urllib.error.URLError as err: pass
return False
if not internet_on():
sys.exit("...exiting. no internet connection")
def get_string_from_file(file):
if os.path.exists(path_for_files + file):
with open(path_for_files + file, 'r') as string_from_file:
global string
string = string_from_file.read().replace('\n', '')
print("...reading " + path_for_files + file)
if not string:
sys.exit("...exiting." + path_for_files + file + "is empty")
else:
print("...getting ", string + "\n")
return string
else:
return None
def write_string_to_file(token_type, token_value):
with open(path_for_files + token_type, "w") as token_file:
token_file.write(token_value)
print("...writing " + token_value + " to " + path_for_files + token_type)
def ensure_api_configured():
print("Checking API configured")
client_id = get_string_from_file('client_id')
client_secret = get_string_from_file('client_secret')
if not client_id or not client_secret:
print("Not configured. Enter API details found at https://www.strava.com/settings/api")
try:
client_id = input("Client ID: ")
client_secret = input("Client Secret: ")
write_string_to_file("client_id", client_id)
write_string_to_file("client_secret", client_secret)
except EOFError:
print("Please populate client_id and client_secret files manually")
sys.exit(1)
def check_if_access_token_valid():
print("Checking Access token valid")
access_token = get_string_from_file('access_token')
strava = Client()
try:
strava.access_token = access_token
strava.get_athlete()
except AccessUnauthorized:
print("Access Token not valid")
return False
print("Access Token valid. Exiting...")
sys.exit(0)
def refresh_current_token():
print("Refreshing current token")
refresh_token = get_string_from_file('refresh_token')
client_id = get_string_from_file('client_id')
client_secret = get_string_from_file('client_secret')
if not refresh_token:
print("No refresh token present.")
request_user_login()
else:
strava = Client()
refresh_response = strava.refresh_access_token(client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token)
write_string_to_file("access_token", refresh_response['access_token'])
write_string_to_file("refresh_token", refresh_response['refresh_token'])
print("Token expires at " + str(refresh_response['expires_at']))
check_if_access_token_valid()
def request_user_login():
print("Requesting user login")
client_id = get_string_from_file('client_id')
client_secret = get_string_from_file('client_secret')
client=Client()
LOGIN_URL = client.authorization_url(client_id=client_id, redirect_uri='http://localhost')
print(LOGIN_URL)
webbrowser.open(LOGIN_URL)
try:
auth_code = input("Enter the auth_code from the redirected URL: ")
write_string_to_file("auth_code", auth_code)
except EOFError:
print("Unable to read code from stdin. Assuming `auth_code` file is manually populated")
auth_code = get_string_from_file('auth_code')
token_response = client.exchange_code_for_token(client_id=client_id, client_secret=client_secret, code=auth_code)
write_string_to_file("access_token", token_response['access_token'])
write_string_to_file("refresh_token", token_response['refresh_token'])
print("Token expires at " + str(token_response['expires_at']))
check_if_access_token_valid()
def main():
ensure_api_configured()
check_if_access_token_valid()
refresh_current_token()
print("Something went wrong")
sys.exit(1)
if __name__ == "__main__":
main()
|
{
"content_hash": "a1f90eafed6a0dffa4ae82d750cf2783",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 117,
"avg_line_length": 33.75,
"alnum_prop": 0.628322440087146,
"repo_name": "rotti/grava",
"id": "6af170db5272153fa9294d43da292da57fcce8fd",
"size": "4613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "token_helper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17627"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from django.db import models
from django.conf import settings
from django_extensions.db.fields import AutoSlugField
from model_utils import Choices
from abs_models import Abs_titulado_slugfy
from Corretor.utils import get_corretor_choices, get_corretor_por_id
from Corretor.base import CorretorException
from Corretor.models import RetornoCorrecao
from Corretor.tasks import run_corretor_validar_gabarito
from tipo_questao import TipoQuestao
from lockable import Lockable
class Questao(Abs_titulado_slugfy,Lockable):
"""
Representa uma Questao, isso é um problema que esta ligado a uma avaliacao que esta por sua vez ligada
a um aluno.
"""
CORRETORES = Choices(*get_corretor_choices())
# CORRETORES = Choices((0,'base','Base'))
enunciado = models.TextField(u"Enunciado")
respostaDiscursiva = models.TextField(u"Resposta Discursiva",blank=True, null=True)
#:Representa o percentual que a programacao tem nessa questao.
percentNotaProgramacao = models.DecimalField(u"Percentual da Nota de Programação",max_digits=10, decimal_places=2,default=Decimal("100"))
#:Representa o percentual que a multipla escolha tem nessa questao.
percentNotaMultipla = models.DecimalField(u"Percentual da Nota das Multiplas Escolhas",max_digits=10, decimal_places=2,default=Decimal("0"))
#:Representa o percentual que a discursiva tem nessa questao.
percentNotaDiscursiva = models.DecimalField(u"Percentual da Nota da Discursiva",max_digits=10, decimal_places=2,default=Decimal("0"))
#:indica se uma questão está pronta ou não para ser usada num template avaliacao.
verificada = models.BooleanField(u"Verificada",default=False)
#:o autor(usuario) dessa questao
autor = models.ForeignKey('auth.User',blank=True,null=True, related_name='questoes_autor')
id_corretor = models.SmallIntegerField(u"Corretor",choices=CORRETORES)#, default=CORRETORES.c)
#tipo que da questao, usado para filtragem
tipo = models.ManyToManyField(TipoQuestao, related_name="questoes")
retorno_correcao = models.ForeignKey('Corretor.RetornoCorrecao',blank=True,null=True, on_delete=models.SET_NULL)
@property
def corretor(self):
"recupera um corretor dado o id_corretor"
return get_corretor_por_id(self.id_corretor)
class Meta:
verbose_name = u'Questão'
app_label = 'Questao'
def __unicode__(self):
return self.slug
def get_rand_entrada(self):
"retorna uma entrada randomica"
import random
count = self.entradasGabarito.all().count()
rand_entrada_num = 0
if count >= 1:
rand_entrada_num = random.randint(0,count -1)
return self.entradasGabarito.all()[rand_entrada_num]
else:
return None
# print rand_entrada_num
def verificar_questao(self):
"""verifica se uma questão esta pronta para ser usada em uma avaliacao
Ou seja, pode ser compilada e executada.
"""
#se nao for uma questao com programacao nao faz essa verificacao
if not self.percentNotaProgramacao > 0:
self.verificada=True
return
corretor = self.corretor()
retorno = self.get_retorno_or_create
self.save(verificar=False)
corretor_task = run_corretor_validar_gabarito.delay(corretor=corretor,questao=self)
retorno = retorno.__class__.objects.get(pk=retorno.pk)
retorno.task_id = corretor_task.task_id
retorno.save()
@property
def is_programacao(self):
"retorna true se essa for uma questao de programação"
return self.percentNotaProgramacao > Decimal("0")
@property
def get_retorno_or_create(self):
retorno = self.retorno_correcao
if not self.retorno_correcao:
retorno = RetornoCorrecao()
retorno.save()
self.retorno_correcao = retorno
return retorno
def save(self, *args, **kwargs):
#Antes de salvar deve verificar se a questão é propria para ser usada em uma avaliacao
#ou seja, da para compilar e executar sem erro.
verificar = kwargs.get('verificar',True)
if self.slug != "" and self.slug != None and verificar == True:
self.verificar_questao()
try:
kwargs.pop('verificar')
except KeyError:
pass
super(Questao, self).save(*args, **kwargs)
|
{
"content_hash": "ae8389ef60bb1e23bed0ed8bf8fef9b0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 144,
"avg_line_length": 36.016129032258064,
"alnum_prop": 0.6860725481415136,
"repo_name": "arruda/amao",
"id": "fe6f7704fe8eb3a9fbe385efee308126702ccc74",
"size": "4503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AMAO/apps/Avaliacao/Questao/models/questao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18513"
},
{
"name": "C++",
"bytes": "2359"
},
{
"name": "CSS",
"bytes": "21310"
},
{
"name": "JavaScript",
"bytes": "3452"
},
{
"name": "Python",
"bytes": "389608"
},
{
"name": "Ruby",
"bytes": "520"
},
{
"name": "Shell",
"bytes": "13785"
}
],
"symlink_target": ""
}
|
from maya import cmds
import pymel.core as pymel
import json
from plugin_json import export_json
from plugin_json import export_json_file
from plugin_json import import_json
from plugin_json import import_json_file
__all__ = (
'export_json_maya',
'export_json_file_maya',
'import_json_maya',
'import_json_file_maya'
)
# TODO: Add support for matrix and vector datatypes
class PymelJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, pymel.PyNode):
return {'_class_pymel':'pymel.PyNode', '__melobject__': o.__melobject__()}
elif isinstance(o, pymel.Attribute):
return {'_class_pymel':'pymel.Attribute', '__melobject__': o.__melobject__()}
elif isinstance(o, pymel.datatypes.Matrix):
return {'_class_pymel':'pymel.datatypes.Matrix', '__melobject__': o.__melobject__()}
elif isinstance(o, pymel.datatypes.Vector):
return {'_class_pymel':'pymel.datatypes.Vector', '__melobject__': [o.x, o.y, o.z]}
elif isinstance(o, pymel.datatypes.Point):
return {'_class_pymel':'pymel.datatypes.Point', '__melobject__': [o.w, o.x, o.y, o.z]}
else:
return super(PymelJSONEncoder, self).default(o)
# TODO: Add support for matrix and vector datatypes
class PymelJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
super(PymelJSONDecoder, self).__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, val):
if isinstance(val, dict):
cls = val.get('_class_pymel', None)
if cls == 'pymel.PyNode':
dagpath = val.get('__melobject__')
val = pymel.PyNode(dagpath) if cmds.objExists(dagpath) else None # TODO: add warning?
elif cls == 'pymel.Attribute':
dagpath = val.get('__melobject__')
val = pymel.Attribute(dagpath) if cmds.objExists(dagpath) else None # TODO: add warning?
elif cls == 'pymel.datatypes.Matrix':
melval = val.get('__melobject__')
val = pymel.datatypes.Matrix(melval)
elif cls == 'pymel.datatypes.Vector':
coords = val.get('__melobject__')
val = pymel.datatypes.Vector(coords)
elif cls == 'pymel.datatypes.Point':
coords = val.get('__melobject__')
val = pymel.datatypes.Point(coords)
return val
def export_json_maya(*args, **kwargs):
return export_json(cls=PymelJSONEncoder, *args, **kwargs)
def export_json_file_maya(*args, **kwargs):
return export_json_file(cls=PymelJSONEncoder, *args, **kwargs)
def import_json_maya(*args, **kwargs):
return import_json(cls=PymelJSONDecoder, *args, **kwargs)
def import_json_file_maya(*args, **kwargs):
return import_json_file(cls=PymelJSONDecoder, *args, **kwargs)
|
{
"content_hash": "9297a6b16bfa1e3583e00b7a0aa3cb27",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 105,
"avg_line_length": 42.338235294117645,
"alnum_prop": 0.6144494616186176,
"repo_name": "renaudll/libSerialization",
"id": "9f6fbe93f4cd5973b7ad2e9b0935bbec67fbca12",
"size": "2963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libSerialization/plugin_maya_json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56430"
}
],
"symlink_target": ""
}
|
"""Lit runner site configuration."""
import os
import platform
import lit.llvm
# Handle the test srcdir for platforms. On windows, things are weird with bazel.
if platform.system() == 'Windows':
srcdir = os.environ['TEST_SRCDIR']
real_test_srcdir = srcdir[:srcdir.find('tensorflow/compiler/mlir')]
external_srcdir = os.path.join(real_test_srcdir, 'external')
else:
real_test_srcdir = os.environ['TEST_SRCDIR']
external_srcdir = real_test_srcdir
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by lit.py. The structure is common for lit
# tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
config.llvm_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'llvm')
config.mlir_obj_root = os.path.join(real_test_srcdir)
config.mlir_tools_dir = os.path.join(external_srcdir, 'llvm-project', 'mlir')
# TODO(jpienaar): Replace with suffices in build rule.
config.suffixes = ['.td', '.mlir', '.pbtxt']
mlir_tf_tools_dirs = [
'tensorflow/core/ir/importexport/',
'tensorflow/core/ir/tests/',
'tensorflow/core/transforms/',
'tensorflow/compiler/mlir',
'tensorflow/compiler/xla/mlir_hlo',
'tensorflow/compiler/xla/mlir_hlo/tosa',
'tensorflow/compiler/xla/translate',
'tensorflow/compiler/xla/translate/mhlo_to_lhlo_with_xla',
'tensorflow/compiler/mlir/lite',
'tensorflow/compiler/mlir/lite/experimental/tac',
'tensorflow/compiler/mlir/quantization/tensorflow',
'tensorflow/compiler/mlir/tensorflow',
'tensorflow/compiler/mlir/tfrt',
'tensorflow/compiler/mlir/xla',
'tensorflow/compiler/mlir/tools/kernel_gen',
'tensorflow/compiler/aot',
'tensorflow/compiler/xla/service/mlir_gpu',
'tensorflow/compiler/xla/service/gpu/tests',
'tensorflow/compiler/xla/mlir/backends/cpu',
'tensorflow/compiler/xla/mlir/backends/gpu',
'tensorflow/compiler/xla/mlir/runtime',
'tensorflow/compiler/mlir/lite/stablehlo',
]
config.mlir_tf_tools_dirs = [
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'], s)
for s in mlir_tf_tools_dirs
]
test_dir = os.environ['TEST_TARGET']
test_dir = test_dir.strip('/').rsplit(':', 1)[0]
config.mlir_test_dir = os.path.join(real_test_srcdir,
os.environ['TEST_WORKSPACE'], test_dir)
if platform.system() == 'Windows':
# Configure this to work with msys2, TF's preferred windows bash.
config.lit_tools_dir = '/usr/bin'
lit.llvm.initialize(lit_config, config)
# Let the main config do the real work.
lit_config.load_config(
config,
os.path.join(
os.path.join(real_test_srcdir, os.environ['TEST_WORKSPACE'],
'tensorflow/compiler/mlir/runlit.cfg.py')))
# pylint: enable=undefined-variable
|
{
"content_hash": "3c0b8e09a1d0946ec27d46bda6815d2b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 39.50704225352113,
"alnum_prop": 0.706951871657754,
"repo_name": "paolodedios/tensorflow",
"id": "4907eaa6135ba17a591deb2fa211faf171704e65",
"size": "3401",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/mlir/runlit.site.cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from uservalid import get_user
import gconfig
import json
def useradd(username,password,telephone,mail,usertel,name,adminrole):
user_list1=get_user()
for user in user_list1:
if username == user['username']:
return False
useradd_f=open(gconfig.USER_FILE,'rb')
useradd_info=useradd_f.read()
useradd_f.close()
useradd_cxt=json.loads(useradd_info)
useradd_cxt.append({'username':username,'password':password,'telephone':telephone,'mail':mail,'usertel':usertel,'name':name,'adminrole':adminrole})
useradd_f=open(gconfig.USER_FILE,'w')
useradd_f.write(json.dumps(useradd_cxt))
useradd_f.close()
return True
|
{
"content_hash": "e36b8b9354d645ac40545c47e6192de3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 151,
"avg_line_length": 34.4,
"alnum_prop": 0.6787790697674418,
"repo_name": "51reboot/actual_09_homework",
"id": "cb9ef86533c983e3c4d0b603324d49ba61156633",
"size": "705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "05/liubaobao/useradd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le paramètre 'relâcher' de la commande 'rames'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmRelacher(Parametre):
"""Commande 'rames relâcher'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "relâcher", "unhold")
self.aide_courte = "relâche les rames"
self.aide_longue = \
"Cette commande permet de relacher les rames."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
rames = salle.rames
if not rames:
personnage << "|err|Il n'y a pas de rames ici.|ff|"
return
if rames.tenu is not personnage:
personnage << "|err|Vous ne tenez pas ces rames.|ff|"
else:
rames.relacher()
|
{
"content_hash": "d99057cb22a4d0bdb2770d6e7de9fd89",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.5891891891891892,
"repo_name": "vlegoff/tsunami",
"id": "116790e8f5e366e5aee759b5769a7ad792190a6c",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/navigation/commandes/rames/relacher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
import j3.jone
|
{
"content_hash": "adc1645de1e03f41d339671910886691",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 14,
"avg_line_length": 14,
"alnum_prop": 0.8571428571428571,
"repo_name": "jskDr/jamespy",
"id": "3e921ab09ab3f861a8b6a03df9896bbf5ba50dfe",
"size": "14",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "j3/jtwo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1381"
},
{
"name": "Python",
"bytes": "477645"
},
{
"name": "Ruby",
"bytes": "220"
}
],
"symlink_target": ""
}
|
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.fast
class DependencyParserConllUTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.conllu = os.getcwd() + "/../src/test/resources/parser/unlabeled/conll-u/train_small.conllu.txt"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setConllU(self.conllu) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
dependency_parsed.show()
@pytest.mark.fast
class DependencyParserTreeBankTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.dependency_treebank = os.getcwd() + "/../src/test/resources/parser/unlabeled/dependency_treebank"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setDependencyTreeBank(self.dependency_treebank) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
dependency_parsed.show()
|
{
"content_hash": "a1d578cf28c5ce66ec482c7396b37734",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 121,
"avg_line_length": 40.592233009708735,
"alnum_prop": 0.5967471896675437,
"repo_name": "JohnSnowLabs/spark-nlp",
"id": "96a9178f098a023807ca30f0fb8e926a0392bdcb",
"size": "4773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/annotator/dependency/dependency_parser_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14452"
},
{
"name": "Java",
"bytes": "223289"
},
{
"name": "Makefile",
"bytes": "819"
},
{
"name": "Python",
"bytes": "1694517"
},
{
"name": "Scala",
"bytes": "4116435"
},
{
"name": "Shell",
"bytes": "5286"
}
],
"symlink_target": ""
}
|
"""
This is the interface for interacting with a SMS service.
"""
import re
from restclients.dao import SMS_DAO
from restclients.exceptions import InvalidPhoneNumber, PhoneNumberRequired
class SMSService(object):
"""
This creates a SMS message to be sent to a destination phone number.
"""
def create_message(self, to, body):
if to != "":
self.validate_phone_number(to)
else:
raise PhoneNumberRequired
dao = SMS_DAO()
return dao.create_message(to, body)
"""
This sends a message to a destination phone number.
"""
def send_message(self, message):
dao = SMS_DAO()
return dao.send_message(message)
'''
Validate North America phone numbers
http://blog.stevenlevithan.com/archives/validate-phone-number
'''
def validate_phone_number(self, number):
if not re.search('^\(?([0-9]{3})\)?[-. ]?([0-9]{3})[-. ]?([0-9]{4})$', number):
raise InvalidPhoneNumber
|
{
"content_hash": "8bc8be8a8ed35f84467e8d7913ec02bc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 87,
"avg_line_length": 29.323529411764707,
"alnum_prop": 0.6208625877632898,
"repo_name": "UWIT-IAM/uw-restclients",
"id": "31e0d0f0235830b2818aa2a74332167c70193967",
"size": "997",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "restclients/sms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "95729"
},
{
"name": "Python",
"bytes": "897420"
},
{
"name": "Shell",
"bytes": "584"
}
],
"symlink_target": ""
}
|
import random
import numpy as np
import marshal
def generate_room(dim=(13, 13), p_change_directions=0.35, num_steps=25, num_boxes=3, tries=4, second_player=False):
"""
Generates a Sokoban room, represented by an integer matrix. The elements are encoded as follows:
wall = 0
empty space = 1
box target = 2
box not on target = 3
box on target = 4
player = 5
:param dim:
:param p_change_directions:
:param num_steps:
:return: Numpy 2d Array
"""
room_state = np.zeros(shape=dim)
room_structure = np.zeros(shape=dim)
# Some times rooms with a score == 0 are the only possibility.
# In these case, we try another model.
for t in range(tries):
room = room_topology_generation(dim, p_change_directions, num_steps)
room = place_boxes_and_player(room, num_boxes=num_boxes, second_player=second_player)
# Room fixed represents all not movable parts of the room
room_structure = np.copy(room)
room_structure[room_structure == 5] = 1
# Room structure represents the current state of the room including movable parts
room_state = room.copy()
room_state[room_state == 2] = 4
room_state, score, box_mapping = reverse_playing(room_state, room_structure)
room_state[room_state == 3] = 4
if score > 0:
break
if score == 0:
raise RuntimeWarning('Generated Model with score == 0')
return room_structure, room_state, box_mapping
def room_topology_generation(dim=(10, 10), p_change_directions=0.35, num_steps=15):
"""
Generate a room topology, which consits of empty floors and walls.
:param dim:
:param p_change_directions:
:param num_steps:
:return:
"""
dim_x, dim_y = dim
# The ones in the mask represent all fields which will be set to floors
# during the random walk. The centered one will be placed over the current
# position of the walk.
masks = [
[
[0, 0, 0],
[1, 1, 1],
[0, 0, 0]
],
[
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
],
[
[0, 0, 0],
[1, 1, 0],
[0, 1, 0]
],
[
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
],
[
[0, 0, 0],
[0, 1, 1],
[0, 1, 0]
]
]
# Possible directions during the walk
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
direction = random.sample(directions, 1)[0]
# Starting position of random walk
position = np.array([
random.randint(1, dim_x - 1),
random.randint(1, dim_y - 1)]
)
level = np.zeros(dim, dtype=int)
for s in range(num_steps):
# Change direction randomly
if random.random() < p_change_directions:
direction = random.sample(directions, 1)[0]
# Update position
position = position + direction
position[0] = max(min(position[0], dim_x - 2), 1)
position[1] = max(min(position[1], dim_y - 2), 1)
# Apply mask
mask = random.sample(masks, 1)[0]
mask_start = position - 1
level[mask_start[0]:mask_start[0] + 3, mask_start[1]:mask_start[1] + 3] += mask
level[level > 0] = 1
level[:, [0, dim_y - 1]] = 0
level[[0, dim_x - 1], :] = 0
return level
def place_boxes_and_player(room, num_boxes, second_player):
"""
Places the player and the boxes into the floors in a room.
:param room:
:param num_boxes:
:return:
"""
# Get all available positions
possible_positions = np.where(room == 1)
num_possible_positions = possible_positions[0].shape[0]
num_players = 2 if second_player else 1
if num_possible_positions <= num_boxes + num_players:
raise RuntimeError('Not enough free spots (#{}) to place {} player and {} boxes.'.format(
num_possible_positions,
num_players,
num_boxes)
)
# Place player(s)
ind = np.random.randint(num_possible_positions)
player_position = possible_positions[0][ind], possible_positions[1][ind]
room[player_position] = 5
if second_player:
ind = np.random.randint(num_possible_positions)
player_position = possible_positions[0][ind], possible_positions[1][ind]
room[player_position] = 5
# Place boxes
for n in range(num_boxes):
possible_positions = np.where(room == 1)
num_possible_positions = possible_positions[0].shape[0]
ind = np.random.randint(num_possible_positions)
box_position = possible_positions[0][ind], possible_positions[1][ind]
room[box_position] = 2
return room
# Global variables used for reverse playing.
explored_states = set()
num_boxes = 0
best_room_score = -1
best_room = None
best_box_mapping = None
def reverse_playing(room_state, room_structure, search_depth=100):
"""
This function plays Sokoban reverse in a way, such that the player can
move and pull boxes.
It ensures a solvable level with all boxes not being placed on a box target.
:param room_state:
:param room_structure:
:param search_depth:
:return: 2d array
"""
global explored_states, num_boxes, best_room_score, best_room, best_box_mapping
# Box_Mapping is used to calculate the box displacement for every box
box_mapping = {}
box_locations = np.where(room_structure == 2)
num_boxes = len(box_locations[0])
for l in range(num_boxes):
box = (box_locations[0][l], box_locations[1][l])
box_mapping[box] = box
# explored_states globally stores the best room state and score found during search
explored_states = set()
best_room_score = -1
best_box_mapping = box_mapping
depth_first_search(room_state, room_structure, box_mapping, box_swaps=0, last_pull=(-1, -1), ttl=300)
return best_room, best_room_score, best_box_mapping
def depth_first_search(room_state, room_structure, box_mapping, box_swaps=0, last_pull=(-1, -1), ttl=300):
"""
Searches through all possible states of the room.
This is a recursive function, which stops if the tll is reduced to 0 or
over 1.000.000 states have been explored.
:param room_state:
:param room_structure:
:param box_mapping:
:param box_swaps:
:param last_pull:
:param ttl:
:return:
"""
global explored_states, num_boxes, best_room_score, best_room, best_box_mapping
ttl -= 1
if ttl <= 0 or len(explored_states) >= 300000:
return
state_tohash = marshal.dumps(room_state)
# Only search this state, if it not yet has been explored
if not (state_tohash in explored_states):
# Add current state and its score to explored states
room_score = box_swaps * box_displacement_score(box_mapping)
if np.where(room_state == 2)[0].shape[0] != num_boxes:
room_score = 0
if room_score > best_room_score:
best_room = room_state
best_room_score = room_score
best_box_mapping = box_mapping
explored_states.add(state_tohash)
for action in ACTION_LOOKUP.keys():
# The state and box mapping need to be copied to ensure
# every action start from a similar state.
room_state_next = room_state.copy()
box_mapping_next = box_mapping.copy()
room_state_next, box_mapping_next, last_pull_next = \
reverse_move(room_state_next, room_structure, box_mapping_next, last_pull, action)
box_swaps_next = box_swaps
if last_pull_next != last_pull:
box_swaps_next += 1
depth_first_search(room_state_next, room_structure,
box_mapping_next, box_swaps_next,
last_pull, ttl)
def reverse_move(room_state, room_structure, box_mapping, last_pull, action):
"""
Perform reverse action. Where all actions in the range [0, 3] correspond to
push actions and the ones greater 3 are simmple move actions.
:param room_state:
:param room_structure:
:param box_mapping:
:param last_pull:
:param action:
:return:
"""
player_position = np.where(room_state == 5)
player_position = np.array([player_position[0][0], player_position[1][0]])
change = CHANGE_COORDINATES[action % 4]
next_position = player_position + change
# Check if next position is an empty floor or an empty box target
if room_state[next_position[0], next_position[1]] in [1, 2]:
# Move player, independent of pull or move action.
room_state[player_position[0], player_position[1]] = room_structure[player_position[0], player_position[1]]
room_state[next_position[0], next_position[1]] = 5
# In addition try to pull a box if the action is a pull action
if action < 4:
possible_box_location = change[0] * -1, change[1] * -1
possible_box_location += player_position
if room_state[possible_box_location[0], possible_box_location[1]] in [3, 4]:
# Perform pull of the adjacent box
room_state[player_position[0], player_position[1]] = 3
room_state[possible_box_location[0], possible_box_location[1]] = room_structure[
possible_box_location[0], possible_box_location[1]]
# Update the box mapping
for k in box_mapping.keys():
if box_mapping[k] == (possible_box_location[0], possible_box_location[1]):
box_mapping[k] = (player_position[0], player_position[1])
last_pull = k
return room_state, box_mapping, last_pull
def box_displacement_score(box_mapping):
"""
Calculates the sum of all Manhattan distances, between the boxes
and their origin box targets.
:param box_mapping:
:return:
"""
score = 0
for box_target in box_mapping.keys():
box_location = np.array(box_mapping[box_target])
box_target = np.array(box_target)
dist = np.sum(np.abs(box_location - box_target))
score += dist
return score
TYPE_LOOKUP = {
0: 'wall',
1: 'empty space',
2: 'box target',
3: 'box on target',
4: 'box not on target',
5: 'player'
}
ACTION_LOOKUP = {
0: 'push up',
1: 'push down',
2: 'push left',
3: 'push right',
4: 'move up',
5: 'move down',
6: 'move left',
7: 'move right',
}
# Moves are mapped to coordinate changes as follows
# 0: Move up
# 1: Move down
# 2: Move left
# 3: Move right
CHANGE_COORDINATES = {
0: (-1, 0),
1: (1, 0),
2: (0, -1),
3: (0, 1)
}
|
{
"content_hash": "f43196e91682296c455f32e6f4461ba9",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 115,
"avg_line_length": 30.701704545454547,
"alnum_prop": 0.5942444711760896,
"repo_name": "mpSchrader/gym-sokoban",
"id": "1d33685764367d2831bc79da47270fdd5cf41bd8",
"size": "10807",
"binary": false,
"copies": "1",
"ref": "refs/heads/default",
"path": "gym_sokoban/envs/room_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63967"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
"""
Console
=======
.. versionadded:: 1.9.1
Reboot of the old inspector, designed to be modular and keep concerns
separated. It also has an addons architecture that allow you to add a button,
panel, or more in the Console itself.
.. warning::
This module works, but might fail in some cases. Please contribute!
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation::
python main.py -m console
Mouse navigation
----------------
When the "Select" button is activated, you can:
- tap once on a widget to select it without leaving inspect mode
- double tap on a widget to select and leave inspect mode (then you can
manipulate the widget again)
Keyboard navigation
-------------------
- "Ctrl + e": toggle console
- "Escape": cancel widget lookup, then hide inspector view
- "Up": select the parent widget
- "Down": select the first child of the currently selected widget
- "Left": select the previous sibling
- "Right": select the next sibling
Additional information
----------------------
Some properties can be edited live. However, due to the delayed usage of
some properties, it might crash if you don't handle the required cases.
Addons
------
Addons must be added to `Console.addons` before the first Clock tick of the
application, or before :attr:`create_console` is called. You currently cannot
add addons on the fly. Addons are quite cheap until the Console is activated.
Panels are even cheaper as nothing is done until the user selects them.
We provide multiple addons activated by default:
- ConsoleAddonFps: display the FPS at the top-right
- ConsoleAddonSelect: activate the selection mode
- ConsoleAddonBreadcrumb: display the hierarchy of the current widget at the
bottom
- ConsoleAddonWidgetTree: panel to display the widget tree of the application
- ConsoleAddonWidgetPanel: panel to display the properties of the selected
widget
If you need to add custom widgets in the Console, please use either
:class:`ConsoleButton`, :class:`ConsoleToggleButton` or :class:`ConsoleLabel`.
An addon must inherit from the :class:`ConsoleAddon` class.
For example, here is a simple addon for displaying the FPS at the top/right
of the Console::
from kivy.modules.console import Console, ConsoleAddon
class ConsoleAddonFps(ConsoleAddon):
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
self.event = Clock.schedule_interval(self.update_fps, 1 / 2.)
def deactivated(self):
self.event.cancel()
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
Console.register_addon(ConsoleAddonFps)
You can create addons that add panels. Panel activation/deactivation is not
tied to the addon activation/deactivation, but in some cases, you can use the
same callback for deactivating the addon and the panel. Here is a simple
"About" panel addon::
from kivy.modules.console import Console, ConsoleAddon, ConsoleLabel
class ConsoleAddonAbout(ConsoleAddon):
def init(self):
self.console.add_panel("About", self.panel_activate,
self.panel_deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def panel_deactivate(self):
self.console.unbind(widget=self.update_content)
def deactivate(self):
self.panel_deactivate()
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
text = "Selected widget is: {!r}".format(widget)
lbl = ConsoleLabel(text=text)
self.console.set_content(lbl)
Console.register_addon(ConsoleAddonAbout)
"""
__all__ = ("start", "stop", "create_console", "Console", "ConsoleAddon",
"ConsoleButton", "ConsoleToggleButton", "ConsoleLabel")
import kivy
kivy.require('1.9.0')
import weakref
from functools import partial
from itertools import chain
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
from kivy.uix.treeview import TreeViewNode, TreeView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.modalview import ModalView
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix
from kivy.graphics.context_instructions import Transform
from kivy.graphics.transformation import Matrix
from kivy.properties import (ObjectProperty, BooleanProperty, ListProperty,
NumericProperty, StringProperty, OptionProperty,
ReferenceListProperty, AliasProperty,
VariableListProperty)
from kivy.graphics.texture import Texture
from kivy.clock import Clock
from kivy.lang import Builder
Builder.load_string("""
<Console>:
size_hint: (1, None) if self.mode == "docked" else (None, None)
height: dp(250)
canvas:
Color:
rgb: .185, .18, .18
Rectangle:
size: self.size
Color:
rgb: .3, .3, .3
Rectangle:
pos: 0, self.height - dp(48)
size: self.width, dp(48)
GridLayout:
cols: 1
id: layout
GridLayout:
id: toolbar
rows: 1
height: "48dp"
size_hint_y: None
padding: "4dp"
spacing: "4dp"
RelativeLayout:
id: content
<ConsoleAddonSeparator>:
size_hint_x: None
width: "10dp"
<ConsoleButton,ConsoleToggleButton,ConsoleLabel>:
size_hint_x: None
width: self.texture_size[0] + dp(20)
<ConsoleAddonBreadcrumbView>:
size_hint_y: None
height: "48dp"
canvas:
Color:
rgb: .3, .3, .3
Rectangle:
size: self.size
ScrollView:
id: sv
do_scroll_y: False
GridLayout:
id: stack
rows: 1
size_hint_x: None
width: self.minimum_width
padding: "4dp"
spacing: "4dp"
<TreeViewProperty>:
height: max(dp(48), max(lkey.texture_size[1], ltext.texture_size[1]))
Label:
id: lkey
text: root.key
text_size: (self.width, None)
width: 150
size_hint_x: None
Label:
id: ltext
text: [repr(getattr(root.widget, root.key, '')), root.refresh][0]\
if root.widget else ''
text_size: (self.width, None)
<ConsoleAddonWidgetTreeView>:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '10dp'
ConsoleAddonWidgetTreeImpl:
id: widgettree
hide_root: True
size_hint: None, None
height: self.minimum_height
width: max(self.parent.width, self.minimum_width)
selected_widget: root.widget
on_select_widget: root.console.highlight_widget(args[1])
<-TreeViewWidget>:
height: self.texture_size[1] + sp(4)
size_hint_x: None
width: self.texture_size[0] + sp(4)
canvas.before:
Color:
rgba: self.color_selected if self.is_selected else (0, 0, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: 1, 1, 1, int(not self.is_leaf)
Rectangle:
source:
('atlas://data/images/defaulttheme/tree_%s' %
('opened' if self.is_open else 'closed'))
size: 16, 16
pos: self.x - 20, self.center_y - 8
canvas:
Color:
rgba:
(self.disabled_color if self.disabled else
(self.color if not self.markup else (1, 1, 1, 1)))
Rectangle:
texture: self.texture
size: self.texture_size
pos:
(int(self.center_x - self.texture_size[0] / 2.),
int(self.center_y - self.texture_size[1] / 2.))
""")
def ignore_exception(f):
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
pass
return f2
class TreeViewProperty(BoxLayout, TreeViewNode):
key = ObjectProperty(None, allownone=True)
refresh = BooleanProperty(False)
widget_ref = ObjectProperty(None, allownone=True)
def _get_widget(self):
wr = self.widget_ref
if wr is None:
return None
wr = wr()
if wr is None:
self.widget_ref = None
return None
return wr
widget = AliasProperty(_get_widget, None, bind=('widget_ref', ))
class ConsoleButton(Button):
"""Button specialized for the Console"""
pass
class ConsoleToggleButton(ToggleButton):
"""ToggleButton specialized for the Console"""
pass
class ConsoleLabel(Label):
"""LabelButton specialized for the Console"""
pass
class ConsoleAddonSeparator(Widget):
pass
class ConsoleAddon(object):
"""Base class for implementing addons"""
#: Console instance
console = None
def __init__(self, console):
super(ConsoleAddon, self).__init__()
self.console = console
self.init()
def init(self):
"""Method called when the addon is instantiated by the Console
"""
pass
def activate(self):
"""Method called when the addon is activated by the console
(when the console is displayed)"""
pass
def deactivate(self):
"""Method called when the addon is deactivated by the console
(when the console is hidden)
"""
pass
class ConsoleAddonMode(ConsoleAddon):
def init(self):
btn = ConsoleToggleButton(text=u"Docked")
self.console.add_toolbar_widget(btn)
class ConsoleAddonSelect(ConsoleAddon):
def init(self):
self.btn = ConsoleToggleButton(text=u"Select")
self.btn.bind(state=self.on_button_state)
self.console.add_toolbar_widget(self.btn)
self.console.bind(inspect_enabled=self.on_inspect_enabled)
def on_inspect_enabled(self, instance, value):
self.btn.state = "down" if value else "normal"
def on_button_state(self, instance, value):
self.console.inspect_enabled = (value == "down")
class ConsoleAddonFps(ConsoleAddon):
_update_ev = None
def init(self):
self.lbl = ConsoleLabel(text="0 Fps")
self.console.add_toolbar_widget(self.lbl, right=True)
def activate(self):
ev = self._update_ev
if ev is None:
self._update_ev = Clock.schedule_interval(self.update_fps, 1 / 2.)
else:
ev()
def deactivated(self):
if self._update_ev is not None:
self._update_ev.cancel()
def update_fps(self, *args):
fps = Clock.get_fps()
self.lbl.text = "{} Fps".format(int(fps))
class ConsoleAddonBreadcrumbView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
parents = []
def on_widget(self, instance, value):
stack = self.ids.stack
# determine if we can just highlight the current one
# or if we need to rebuild the breadcrumb
prefs = [btn.widget_ref() for btn in self.parents]
if value in prefs:
# ok, so just toggle this one instead.
index = prefs.index(value)
for btn in self.parents:
btn.state = "normal"
self.parents[index].state = "down"
return
# we need to rebuild the breadcrumb.
stack.clear_widgets()
if not value:
return
widget = value
parents = []
while True:
btn = ConsoleButton(text=widget.__class__.__name__)
btn.widget_ref = weakref.ref(widget)
btn.bind(on_release=self.highlight_widget)
parents.append(btn)
if widget == widget.parent:
break
widget = widget.parent
for btn in reversed(parents):
stack.add_widget(btn)
self.ids.sv.scroll_x = 1
self.parents = parents
btn.state = "down"
def highlight_widget(self, instance):
self.console.widget = instance.widget_ref()
class ConsoleAddonBreadcrumb(ConsoleAddon):
def init(self):
self.view = ConsoleAddonBreadcrumbView()
self.view.console = self.console
self.console.ids.layout.add_widget(self.view)
def activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
self.view.widget = self.console.widget
class ConsoleAddonWidgetPanel(ConsoleAddon):
def init(self):
self.console.add_panel("Properties", self.panel_activate,
self.deactivate)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not widget:
return
from kivy.uix.scrollview import ScrollView
self.root = root = BoxLayout()
self.sv = sv = ScrollView(scroll_type=["bars", "content"],
bar_width='10dp')
treeview = TreeView(hide_root=True, size_hint_y=None)
treeview.bind(minimum_height=treeview.setter("height"))
keys = list(widget.properties().keys())
keys.sort()
node = None
wk_widget = weakref.ref(widget)
for key in keys:
node = TreeViewProperty(key=key, widget_ref=wk_widget)
node.bind(is_selected=self.show_property)
try:
widget.bind(**{
key: partial(self.update_node_content, weakref.ref(node))
})
except:
pass
treeview.add_node(node)
root.add_widget(sv)
sv.add_widget(treeview)
self.console.set_content(root)
def show_property(self, instance, value, key=None, index=-1, *l):
# normal call: (tree node, focus, )
# nested call: (widget, prop value, prop key, index in dict/list)
if value is False:
return
console = self.console
content = None
if key is None:
# normal call
nested = False
widget = instance.widget
key = instance.key
prop = widget.property(key)
value = getattr(widget, key)
else:
# nested call, we might edit subvalue
nested = True
widget = instance
prop = None
dtype = None
if isinstance(prop, AliasProperty) or nested:
# trying to resolve type dynamically
if type(value) in (str, str):
dtype = 'string'
elif type(value) in (int, float):
dtype = 'numeric'
elif type(value) in (tuple, list):
dtype = 'list'
if isinstance(prop, NumericProperty) or dtype == 'numeric':
content = TextInput(text=str(value) or '', multiline=False)
content.bind(
text=partial(self.save_property_numeric, widget, key, index))
elif isinstance(prop, StringProperty) or dtype == 'string':
content = TextInput(text=value or '', multiline=True)
content.bind(
text=partial(self.save_property_text, widget, key, index))
elif (isinstance(prop, ListProperty) or
isinstance(prop, ReferenceListProperty) or
isinstance(prop, VariableListProperty) or dtype == 'list'):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for i, item in enumerate(value):
button = Button(text=repr(item), size_hint_y=None, height=44)
if isinstance(item, Widget):
button.bind(on_release=partial(console.highlight_widget,
item, False))
else:
button.bind(on_release=partial(self.show_property, widget,
item, key, i))
content.add_widget(button)
elif isinstance(prop, OptionProperty):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for option in prop.options:
button = ToggleButton(
text=option,
state='down' if option == value else 'normal',
group=repr(content.uid),
size_hint_y=None,
height=44)
button.bind(
on_press=partial(self.save_property_option, widget, key))
content.add_widget(button)
elif isinstance(prop, ObjectProperty):
if isinstance(value, Widget):
content = Button(text=repr(value))
content.bind(
on_release=partial(console.highlight_widget, value))
elif isinstance(value, Texture):
content = Image(texture=value)
else:
content = Label(text=repr(value))
elif isinstance(prop, BooleanProperty):
state = 'down' if value else 'normal'
content = ToggleButton(text=key, state=state)
content.bind(on_release=partial(self.save_property_boolean, widget,
key, index))
self.root.clear_widgets()
self.root.add_widget(self.sv)
if content:
self.root.add_widget(content)
@ignore_exception
def save_property_numeric(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = float(instance.text)
else:
setattr(widget, key, float(instance.text))
@ignore_exception
def save_property_text(self, widget, key, index, instance, value):
if index >= 0:
getattr(widget, key)[index] = instance.text
else:
setattr(widget, key, instance.text)
@ignore_exception
def save_property_boolean(self, widget, key, index, instance, ):
value = instance.state == 'down'
if index >= 0:
getattr(widget, key)[index] = value
else:
setattr(widget, key, value)
@ignore_exception
def save_property_option(self, widget, key, instance, *l):
setattr(widget, key, instance.text)
class TreeViewWidget(Label, TreeViewNode):
widget = ObjectProperty(None)
class ConsoleAddonWidgetTreeImpl(TreeView):
selected_widget = ObjectProperty(None, allownone=True)
__events__ = ('on_select_widget', )
def __init__(self, **kwargs):
super(ConsoleAddonWidgetTreeImpl, self).__init__(**kwargs)
self.update_scroll = Clock.create_trigger(self._update_scroll)
def find_node_by_widget(self, widget):
for node in self.iterate_all_nodes():
if not node.parent_node:
continue
try:
if node.widget == widget:
return node
except ReferenceError:
pass
return None
def update_selected_widget(self, widget):
if widget:
node = self.find_node_by_widget(widget)
if node:
self.select_node(node, False)
while node and isinstance(node, TreeViewWidget):
if not node.is_open:
self.toggle_node(node)
node = node.parent_node
def on_selected_widget(self, inst, widget):
if widget:
self.update_selected_widget(widget)
self.update_scroll()
def select_node(self, node, select_widget=True):
super(ConsoleAddonWidgetTreeImpl, self).select_node(node)
if select_widget:
try:
self.dispatch("on_select_widget", node.widget.__self__)
except ReferenceError:
pass
def on_select_widget(self, widget):
pass
def _update_scroll(self, *args):
node = self._selected_node
if not node:
return
self.parent.scroll_to(node)
class ConsoleAddonWidgetTreeView(RelativeLayout):
widget = ObjectProperty(None, allownone=True)
_window_node = None
def _update_widget_tree_node(self, node, widget, is_open=False):
tree = self.ids.widgettree
update_nodes = []
nodes = {}
for cnode in node.nodes[:]:
try:
nodes[cnode.widget] = cnode
except ReferenceError:
# widget no longer exists, just remove it
pass
tree.remove_node(cnode)
for child in widget.children:
if isinstance(child, Console):
continue
if child in nodes:
cnode = tree.add_node(nodes[child], node)
else:
cnode = tree.add_node(
TreeViewWidget(text=child.__class__.__name__,
widget=child.proxy_ref,
is_open=is_open), node)
update_nodes.append((cnode, child))
return update_nodes
def update_widget_tree(self, *args):
win = self.console.win
if not self._window_node:
self._window_node = self.ids.widgettree.add_node(
TreeViewWidget(text="Window",
widget=win,
is_open=True))
nodes = self._update_widget_tree_node(self._window_node, win,
is_open=True)
while nodes:
ntmp = nodes[:]
nodes = []
for node in ntmp:
nodes += self._update_widget_tree_node(*node)
self.ids.widgettree.update_selected_widget(self.widget)
class ConsoleAddonWidgetTree(ConsoleAddon):
def init(self):
self.content = None
self.console.add_panel("Tree", self.panel_activate, self.deactivate,
self.panel_refresh)
def panel_activate(self):
self.console.bind(widget=self.update_content)
self.update_content()
def deactivate(self):
if self.content:
self.content.widget = None
self.content.console = None
self.console.unbind(widget=self.update_content)
def update_content(self, *args):
widget = self.console.widget
if not self.content:
self.content = ConsoleAddonWidgetTreeView()
self.content.console = self.console
self.content.widget = widget
self.content.update_widget_tree()
self.console.set_content(self.content)
def panel_refresh(self):
if self.content:
self.content.update_widget_tree()
class Console(RelativeLayout):
"""Console interface
This widget is created by create_console(), when the module is loaded.
During that time, you can add addons on the console to extend the
functionalities, or add your own application stats / debugging module.
"""
#: Array of addons that will be created at Console creation
addons = [ # ConsoleAddonMode,
ConsoleAddonSelect, ConsoleAddonFps, ConsoleAddonWidgetPanel,
ConsoleAddonWidgetTree, ConsoleAddonBreadcrumb]
#: Display mode of the Console, either docked at the bottom, or as a
#: floating window.
mode = OptionProperty("docked", options=["docked", "floated"])
#: Current widget being selected
widget = ObjectProperty(None, allownone=True)
#: Indicate if the inspector inspection is enabled. If yes, the next
#: touch down will select a the widget under the touch
inspect_enabled = BooleanProperty(False)
#: True if the Console is activated (showed)
activated = BooleanProperty(False)
def __init__(self, **kwargs):
self.win = kwargs.pop('win', None)
super(Console, self).__init__(**kwargs)
self.avoid_bring_to_top = False
with self.canvas.before:
self.gcolor = Color(1, 0, 0, .25)
PushMatrix()
self.gtransform = Transform(Matrix())
self.grect = Rectangle(size=(0, 0))
PopMatrix()
Clock.schedule_interval(self.update_widget_graphics, 0)
# instantiate all addons
self._toolbar = {"left": [], "panels": [], "right": []}
self._addons = []
self._panel = None
for addon in self.addons:
instance = addon(self)
self._addons.append(instance)
self._init_toolbar()
# select the first panel
self._panel = self._toolbar["panels"][0]
self._panel.state = "down"
self._panel.cb_activate()
def _init_toolbar(self):
toolbar = self.ids.toolbar
for key in ("left", "panels", "right"):
if key == "right":
toolbar.add_widget(Widget())
for el in self._toolbar[key]:
toolbar.add_widget(el)
if key != "right":
toolbar.add_widget(ConsoleAddonSeparator())
@classmethod
def register_addon(cls, addon):
cls.addons.append(addon)
def add_toolbar_widget(self, widget, right=False):
"""Add a widget in the top left toolbar of the Console.
Use `right=True` if you wanna add the widget at the right instead.
"""
key = "right" if right else "left"
self._toolbar[key].append(widget)
def remove_toolbar_widget(self, widget):
"""Remove a widget from the toolbar
"""
self.ids.toolbar.remove_widget(widget)
def add_panel(self, name, cb_activate, cb_deactivate, cb_refresh=None):
"""Add a new panel in the Console.
- `cb_activate` is a callable that will be called when the panel is
activated by the user.
- `cb_deactivate` is a callable that will be called when the panel is
deactivated or when the console will hide.
- `cb_refresh` is an optional callable that is called if the user
click again on the button for display the panel
When activated, it's up to the panel to display a content in the
Console by using :meth:`set_content`.
"""
btn = ConsoleToggleButton(text=name)
btn.cb_activate = cb_activate
btn.cb_deactivate = cb_deactivate
btn.cb_refresh = cb_refresh
btn.bind(on_press=self._activate_panel)
self._toolbar["panels"].append(btn)
def _activate_panel(self, instance):
if self._panel != instance:
self._panel.cb_deactivate()
self._panel.state = "normal"
self.ids.content.clear_widgets()
self._panel = instance
self._panel.cb_activate()
self._panel.state = "down"
else:
self._panel.state = "down"
if self._panel.cb_refresh:
self._panel.cb_refresh()
def set_content(self, content):
"""Replace the Console content with a new one.
"""
self.ids.content.clear_widgets()
self.ids.content.add_widget(content)
def on_touch_down(self, touch):
ret = super(Console, self).on_touch_down(touch)
if (('button' not in touch.profile or touch.button == 'left') and
not ret and self.inspect_enabled):
self.highlight_at(*touch.pos)
if touch.is_double_tap:
self.inspect_enabled = False
ret = True
else:
ret = self.collide_point(*touch.pos)
return ret
def on_touch_move(self, touch):
ret = super(Console, self).on_touch_move(touch)
if not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
ret = True
return ret
def on_touch_up(self, touch):
ret = super(Console, self).on_touch_up(touch)
if not ret and self.inspect_enabled:
ret = True
return ret
def on_window_children(self, win, children):
if self.avoid_bring_to_top or not self.activated:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def highlight_at(self, x, y):
"""Select a widget from a x/y window coordinate.
This is mostly used internally when Select mode is activated
"""
widget = None
# reverse the loop - look at children on top first and
# modalviews before others
win_children = self.win.children
children = chain((c for c in reversed(win_children)
if isinstance(c, ModalView)),
(c for c in reversed(win_children)
if not isinstance(c, ModalView)))
for child in children:
if child is self:
continue
widget = self.pick(child, x, y)
if widget:
break
self.highlight_widget(widget)
def highlight_widget(self, widget, *largs):
# no widget to highlight, reduce rectangle to 0, 0
self.widget = widget
if not widget:
self.grect.size = 0, 0
def update_widget_graphics(self, *l):
if not self.activated:
return
if self.widget is None:
self.grect.size = 0, 0
return
self.grect.size = self.widget.size
matrix = self.widget.get_window_matrix()
if self.gtransform.matrix.get() != matrix.get():
self.gtransform.matrix = matrix
def pick(self, widget, x, y):
"""Pick a widget at x/y, given a root `widget`
"""
ret = None
# try to filter widgets that are not visible (invalid inspect target)
if (hasattr(widget, 'visible') and not widget.visible):
return ret
if widget.collide_point(x, y):
ret = widget
x2, y2 = widget.to_local(x, y)
# reverse the loop - look at children on top first
for child in reversed(widget.children):
ret = self.pick(child, x2, y2) or ret
return ret
def on_activated(self, instance, activated):
if activated:
self._activate_console()
else:
self._deactivate_console()
def _activate_console(self):
if self not in self.win.children:
self.win.add_widget(self)
self.y = 0
for addon in self._addons:
addon.activate()
Logger.info('Console: console activated')
def _deactivate_console(self):
for addon in self._addons:
addon.deactivate()
self.grect.size = 0, 0
self.y = -self.height
self.widget = None
self.inspect_enabled = False
# self.win.remove_widget(self)
self._window_node = None
Logger.info('Console: console deactivated')
def keyboard_shortcut(self, win, scancode, *largs):
modifiers = largs[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
if self.activated:
self.inspect_enabled = True
return True
elif scancode == 27:
if self.inspect_enabled:
self.inspect_enabled = False
return True
if self.activated:
self.activated = False
return True
if not self.activated or not self.widget:
return
if scancode == 273: # top
self.widget = self.widget.parent
elif scancode == 274: # down
filtered_children = [c for c in self.widget.children
if not isinstance(c, Console)]
if filtered_children:
self.widget = filtered_children[0]
elif scancode == 276: # left
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = max(0, index - 1)
self.widget = filtered_children[index]
elif scancode == 275: # right
parent = self.widget.parent
filtered_children = [c for c in parent.children
if not isinstance(c, Console)]
index = filtered_children.index(self.widget)
index = min(len(filtered_children) - 1, index + 1)
self.widget = filtered_children[index]
def create_console(win, ctx, *l):
ctx.console = Console(win=win)
win.bind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
def start(win, ctx):
"""Create an Console instance attached to the *ctx* and bound to the
Window's :meth:`~kivy.core.window.WindowBase.on_keyboard` event for
capturing the keyboard shortcut.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget to be inspected.
"""
Clock.schedule_once(partial(create_console, win, ctx))
def stop(win, ctx):
"""Stop and unload any active Inspectors for the given *ctx*."""
if hasattr(ctx, "console"):
win.unbind(children=ctx.console.on_window_children,
on_keyboard=ctx.console.keyboard_shortcut)
win.remove_widget(ctx.console)
del ctx.console
|
{
"content_hash": "ef335ddfb415f238f837cf50636ebba1",
"timestamp": "",
"source": "github",
"line_count": 1058,
"max_line_length": 79,
"avg_line_length": 32.460302457466916,
"alnum_prop": 0.5856506420522377,
"repo_name": "LogicalDash/kivy",
"id": "6d5c46f785108043b54a46e29301fb23228d13fd",
"size": "34358",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "kivy/modules/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321844"
},
{
"name": "C++",
"bytes": "3551"
},
{
"name": "Emacs Lisp",
"bytes": "9838"
},
{
"name": "GLSL",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "4272"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "4071040"
},
{
"name": "Shell",
"bytes": "10849"
},
{
"name": "Vim script",
"bytes": "1731"
}
],
"symlink_target": ""
}
|
import util
import base
import tempfile
import os
class ReopenableTempFile(base.BaseAttribute):
_known_options = set(('mode', 'bufsize', 'suffix', 'prefix', 'dir'))
def __init__(self, **kwargs):
super(ReopenableTempFile, self).__init__()
self._file = None
self.attr(kwargs)
def __enter__(self):
self._file = tempfile.NamedTemporaryFile(delete=False,
**self._attributes)
return self._file
def __exit__(self, exc_type, exc_value, traceback):
self._file.close()
os.remove(self._file.name)
@property
def file(self):
if self._file is None:
return None
return self._file.file
def close(self):
if self._file is not None:
return self._file.close()
def flush(self):
if self._file is not None:
return self._file.flush()
def fileno(self):
if self._file is not None:
return self._file.fileno()
def next(self):
if self._file is not None:
return self._file.next()
def read(self, *args, **kwargs):
if self._file is not None:
return self._file.read(*args, **kwargs)
def readline(self, *args, **kwargs):
if self._file is not None:
return self._file.readline(*args, **kwargs)
def readlines(self, *args, **kwargs):
if self._file is not None:
return self._file.readlines(*args, **kwargs)
def seek(self, *args, **kwargs):
if self._file is not None:
return self._file.seek(*args, **kwargs)
def tell(self):
if self._file is not None:
return self._file.tell()
def truncate(self, *args, **kwargs):
if self._file is not None:
return self._file.truncate(*args, **kwargs)
def write(self, *args, **kwargs):
if self._file is not None:
return self._file.write(*args, **kwargs)
def writelines(self, *args, **kwargs):
if self._file is not None:
return self._file.writelines(*args, **kwargs)
@property
def closed(self):
if self._file is not None:
return self._file.closed
@property
def encoding(self):
if self._file is not None:
return self._file.encoding
@property
def mode(self):
if self._file is not None:
return self._file.mode
@property
def name(self):
if self._file is not None:
return self._file.name
|
{
"content_hash": "596e969c8327f9e3867f9bb97d3d8145",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 72,
"avg_line_length": 27.329896907216494,
"alnum_prop": 0.5330064126744625,
"repo_name": "codefortokyo/data-processing-tools",
"id": "7f1e15dfee4d0769dea7210aa45bafaed950297d",
"size": "2676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cftt/common/reopenabletempfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "43938"
}
],
"symlink_target": ""
}
|
"""Generate Java targets from Antlr3 and Antlr4.
See https://www.antlr.org.
"""
from pants.backend.codegen.antlr.java.antlr_java_gen import AntlrJavaGen
from pants.backend.codegen.antlr.java.java_antlr_library import (
JavaAntlrLibrary as JavaAntlrLibraryV1,
)
from pants.backend.codegen.antlr.java.target_types import JavaAntlrLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(targets={"java_antlr_library": JavaAntlrLibraryV1})
def register_goals():
task(name="antlr-java", action=AntlrJavaGen).install("gen")
def target_types():
return [JavaAntlrLibrary]
|
{
"content_hash": "5464b79ff97743a1960af72a5b1f7a15",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.7835195530726257,
"repo_name": "tdyas/pants",
"id": "087787c20df7efd19d0588c0cc5edac3a9f0cde4",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/codegen/antlr/java/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.views.generic import View, TemplateView
from .models import *
import datetime
class MainView(View):
template_name="about/main.html"
@classmethod
def sort_date(cls, item):
key = item.end_date
if not key:
key = datetime.date.today() + datetime.timedelta(days=365)
return key
def get(self, request, *args, **kwargs):
positions = sorted(list(Position.objects.all()), key=MainView.sort_date, reverse=True)
schools = sorted(list(School.objects.all()), key=MainView.sort_date, reverse=True)
data = {'positions':positions, 'schools':schools}
return render(request, self.template_name, data)
|
{
"content_hash": "4615bc12dabfa27c12e127666f8f1415",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 94,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.6544715447154471,
"repo_name": "m-clark/mclarkpw",
"id": "e191f3ee1316be624b684564c14336d6618e9b94",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mclarkpw/about/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "165494"
},
{
"name": "JavaScript",
"bytes": "1842"
},
{
"name": "Python",
"bytes": "43304"
},
{
"name": "Ruby",
"bytes": "874"
}
],
"symlink_target": ""
}
|
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
f = open(filename, 'r')
content_file = f.read()
f.close()
mimic_dict = {}
list_words = content_file.split()
mimic_dict[''] = list_words[0]
i = 1
while i < len(list_words):
if mimic_dict.get(list_words[i], []) == []:
mimic_dict[list_words[i]] = []
if i + 1 < len(list_words):
mimic_dict[list_words[i]].append(list_words[i + 1])
else:
mimic_dict[list_words[i]].append('')
i += 1
return mimic_dict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
printed_column = 0
for i in xrange(0, 200):
print word,
printed_column += len(word)
# if printed_column >= 70:
# printed_column = 0
# print
if isinstance(mimic_dict[word], list):
word = random.choice(mimic_dict[word])
else:
word = random.choice([mimic_dict[word]])
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
|
{
"content_hash": "107d12003cbb675d736fb54bc994bb1e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 29.382022471910112,
"alnum_prop": 0.6841300191204589,
"repo_name": "hugonomura/google-python-exercises",
"id": "b5ff5f9d2963b80fae14de57106346e1085bd5bb",
"size": "2846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/mimic.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53775"
}
],
"symlink_target": ""
}
|
"""Copyright 2013 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import pinject
class CopiedClassesTest(unittest.TestCase):
def test_new_object_graph_works(self):
class SomeClass(object):
pass
obj_graph = pinject.new_object_graph(classes=[SomeClass])
self.assertIsInstance(obj_graph.provide(SomeClass), SomeClass)
|
{
"content_hash": "082edc4242e6fb5281a0cacdc8e692d9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 31.964285714285715,
"alnum_prop": 0.7508379888268156,
"repo_name": "google/pinject",
"id": "2d8910fc08d659ee7fbbf2c53b878ac6b1f51996",
"size": "895",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/pinject_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "214385"
}
],
"symlink_target": ""
}
|
from google.cloud import datastream_v1
def sample_list_stream_objects():
# Create a client
client = datastream_v1.DatastreamClient()
# Initialize request argument(s)
request = datastream_v1.ListStreamObjectsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_stream_objects(request=request)
# Handle the response
for response in page_result:
print(response)
# [END datastream_v1_generated_Datastream_ListStreamObjects_sync]
|
{
"content_hash": "307bba48542f482236959d0d03befad5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 65,
"avg_line_length": 25.45,
"alnum_prop": 0.7092337917485265,
"repo_name": "googleapis/python-datastream",
"id": "9c58e250f60963e446f763f10d612fa5248815f3",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datastream_v1_generated_datastream_list_stream_objects_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1645544"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
"""Check internal peering forwarding limits which affect GKE.
Internal Load Balancer creation can fail due to VPC internal forwarding rules limits.
"""
from gcpdiag import lint, models
from gcpdiag.lint.gke import util
from gcpdiag.queries import apis, gke, logs
MATCH_STR_1 = 'INTERNAL_FORWARDING_RULES_WITH_PEERING_LIMITS_EXCEEDED'
MATCH_STR_2 = 'SyncLoadBalancerFailed'
logs_by_project = {}
def prepare_rule(context: models.Context):
logs_by_project[context.project_id] = logs.query(
project_id=context.project_id,
resource_type='k8s_cluster',
log_name='log_id("events")',
filter_str=
f'jsonPayload.message:"{MATCH_STR_1}" AND jsonPayload.reason:"{MATCH_STR_2}"'
)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# skip entire rule is logging disabled
if not apis.is_enabled(context.project_id, 'logging'):
report.add_skipped(None, 'logging api is disabled')
return
# Any work to do?
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
# Search the logs.
def filter_f(log_entry):
try:
if (MATCH_STR_1 in log_entry['jsonPayload']['message']) and (
MATCH_STR_2 in log_entry['jsonPayload']['reason']):
return True
except KeyError:
return False
bad_clusters = util.gke_logs_find_bad_clusters(
context=context, logs_by_project=logs_by_project, filter_f=filter_f)
# Create the report.
for _, c in sorted(clusters.items()):
if c in bad_clusters:
report.add_failed(c, logs.format_log_entry(bad_clusters[c]))
else:
report.add_ok(c)
|
{
"content_hash": "22d7a68b64dbf121a015bbfb5f3b78e8",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 85,
"avg_line_length": 31.056603773584907,
"alnum_prop": 0.6931956257594167,
"repo_name": "GoogleCloudPlatform/gcpdiag",
"id": "1140a3177fb9255349d11beefbe28e0643172112",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gcpdiag/lint/gke/err_2021_010_internal_forwarding_rule_limits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4610"
},
{
"name": "HCL",
"bytes": "90111"
},
{
"name": "HTML",
"bytes": "8149"
},
{
"name": "Jinja",
"bytes": "1231"
},
{
"name": "Makefile",
"bytes": "51860"
},
{
"name": "Python",
"bytes": "792739"
},
{
"name": "SCSS",
"bytes": "1435"
},
{
"name": "Shell",
"bytes": "10973"
},
{
"name": "Smarty",
"bytes": "726"
}
],
"symlink_target": ""
}
|
import os
import sys
import unittest
import logging
APP_ROOT = os.getenv('APP_ROOT')
sys.path.insert(0, '%s/currypy/src' % APP_ROOT)
sys.path.insert(0, '%s/pypatterns/src' % APP_ROOT)
import pypatterns.filter as FilterModule
class Resource(object):
def name(self, value=None):
if value is not None:
self._name = value
if not hasattr(self, '_name'):
self._name = None
return self._name
# END class
pass
class TestCase(unittest.TestCase):
def testNotFilter(self):
name = "test"
# create the filter
aFilter = FilterModule.constructNotFilter()
aFilter.addFilter(FilterModule.NameFilter(name))
aResource = Resource()
aResource.name(name)
assert aFilter.matches(aResource) is False, \
"expected notfilter to not match resource named %s" % aResource.name()
name = "wrong name"
aResource = Resource()
aResource.name(name)
assert aFilter.matches(aResource), \
"expected notfilter to match resource named %s" % aResource.name()
pass
def testAndOrFilter(self):
name1 = "test1"
name2 = "test2"
resource1 = Resource()
resource1.name(name1)
resource2 = Resource()
resource2.name(name2)
# test to make sure that if min passes, succeeds
aFilter = FilterModule.CompositeFilter(FilterModule.AndOrAccumulator())
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
assert aFilter.matches(resource1) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource1.name()
assert aFilter.matches(resource2), \
"expected andor(1,1) filter to accept resource %s" %resource2.name()
# test to make sure that if max passes, succeeds
aFilter = FilterModule.CompositeFilter(FilterModule.AndOrAccumulator(2,2))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
assert aFilter.matches(resource1), \
"expected andor(1,1) filter to accept resource %s" % resource1.name()
assert aFilter.matches(resource2) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource2.name()
# test to make sure that if too many Filters passes, fails
aFilter = FilterModule.CompositeFilter(FilterModule.AndOrAccumulator(0,0))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
assert aFilter.matches(resource1) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource1.name()
assert aFilter.matches(resource2) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource2.name()
# test to make sure that if not enough Filters pass, fails
aFilter = FilterModule.CompositeFilter(FilterModule.AndOrAccumulator(3,3))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
assert aFilter.matches(resource1) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource1.name()
assert aFilter.matches(resource2) is False, \
"expected andor(1,1) filter to not accept resource %s" % resource2.name()
pass
def testOrFilter(self):
name1 = "test1"
name2 = "test2"
name3 = "test3"
aFilter = FilterModule.constructOrFilter()
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
# if any passes, succeed
aResource = Resource()
aResource.name(name1)
assert aFilter.matches(aResource), \
"expected orfilter to match resource named %s" % aResource.name()
aResource = Resource()
aResource.name(name2)
assert aFilter.matches(aResource), \
"expected orfilter to match resource named %s" % aResource.name()
aResource = Resource()
aResource.name(name3)
assert not aFilter.matches(aResource), \
"expected orfilter to match resource named %s" % aResource.name()
# run again
aResource = Resource()
aResource.name(name1)
assert aFilter.matches(aResource), \
"expected notfilter to match resource named %s" % aResource.name()
aResource = Resource()
aResource.name(name2)
assert aFilter.matches(aResource), \
"expected notfilter to match resource named %s" % aResource.name()
aResource = Resource()
aResource.name(name3)
assert not aFilter.matches(aResource), \
"expected notfilter to match resource named %s" % aResource.name()
def testAndFilter(self):
name1 = "test1"
name2 = "test2"
aFilter = FilterModule.constructAndFilter()
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name1))
# if all passes, succeed
aResource = Resource()
aResource.name(name1)
assert aFilter.matches(aResource), \
"expected andfilter to match resource named %s" % aResource.name()
# if none passes, fail
aResource = Resource()
aResource.name(name2)
assert aFilter.matches(aResource) is False, \
"expected andfilter to not match resource named %s" % aResource.name()
# if only some passes, fail
aFilter = FilterModule.constructAndFilter()
aFilter.addFilter(FilterModule.NameFilter(name1))
aFilter.addFilter(FilterModule.NameFilter(name2))
aResource = Resource()
aResource.name(name1)
assert aFilter.matches(aResource) is False, \
"expected andfilter to not match resource named %s" % aResource.name()
pass
def testOrFilter(self):
pass
def testObjectFilter(self):
pass
def testNameFilter(self):
name = "test"
aFilter = FilterModule.NameFilter(name)
aResource = Resource()
aResource.name(name)
assert aFilter.matches(aResource), \
"expected filter to match resource named %s" % aResource.name()
aResource = Resource()
aResource.name("wrong name")
assert aFilter.matches(aResource) is False, \
"expected filter to not match resource named %s" % aResource.name()
pass
def testGeneratorFilter(self):
"""
this test the ability to apply a filter to values
produced by an iterator and/or generator
"""
lists = []
functions = []
numbers = range(1,11)
lists.append(numbers)
functions.append(lambda x: x)
lists.append([(0,x) for x in numbers])
functions.append(lambda x: x[1])
for list, function in zip(lists, functions):
mod2Filter = FilterModule.ObjectFilter(lambda: 0,
lambda x: function(x) % 2)
mod2Filtered = mod2Filter.wrap(list)
mod5Filter = FilterModule.ObjectFilter(lambda: 0,
lambda x: function(x) % 5)
filtered = mod5Filter.wrap(mod2Filtered,
objectFunction=function)
cardinality = 0
for number in filtered:
if cardinality > 0:
assert False, \
"expected only one value in generator"
assert number is 10, \
"expected value to be 10, got %s" % number
cardinality = cardinality + 1
pass
pass
def testMemberOfObjectKeyMatchesFilter1(self):
filter = FilterModule.MemberOfObjectKeyMatchesFilter(
filter = FilterModule.IdentityFilter(2),
accumulator = FilterModule.AndAccumulator(),
keyFunction = lambda x: x.values()
)
filter._shouldCache = False
class X(object):
pass
obj = X()
setattr(obj, 'values', lambda: [0, 2, 2])
assert not filter.matches(obj)
setattr(obj, 'values', lambda: [2, 2, 2])
assert filter.matches(obj)
return
def testMemberOfObjectKeyMatchesFilter2(self):
filter = FilterModule.MemberOfObjectKeyMatchesFilter(
filter = FilterModule.IdentityFilter(2),
accumulator = FilterModule.OrAccumulator(),
keyFunction = lambda x: x.values()
)
filter._shouldCache = False
class X(object):
pass
obj = X()
setattr(obj, 'values', lambda: [0, 2, 1])
assert filter.matches(obj)
setattr(obj, 'values', lambda: [0, 1, 3])
assert not filter.matches(obj)
return
pass
def main():
suite = unittest.makeSuite(TestCase,'test')
runner = unittest.TextTestRunner()
runner.run(suite)
return
if __name__=="__main__":
main()
|
{
"content_hash": "da095ff4aa1cd7000c887844c6e34943",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 93,
"avg_line_length": 33.62587412587413,
"alnum_prop": 0.6018508890506394,
"repo_name": "mjpan/pypatterns",
"id": "76684bf7ea0d634c07a216c02810f1f4a01c765e",
"size": "9617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/TestFilter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "84602"
}
],
"symlink_target": ""
}
|
def main():
print("Hello world!")
|
{
"content_hash": "7895e910276237731b352db8276c80a7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 19,
"alnum_prop": 0.5789473684210527,
"repo_name": "hahcho/pytroid",
"id": "46b4ca549d1608ae3d562b5d3c19f01a0942cde0",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytroid/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "599"
}
],
"symlink_target": ""
}
|
"""remove infra_driver column
Revision ID: 8f7145914cb0
Revises: 0ae5b1ce3024
Create Date: 2016-12-08 17:28:26.609343
"""
# revision identifiers, used by Alembic.
revision = '8f7145914cb0'
down_revision = '0ae5b1ce3024'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.drop_column('vnfd', 'infra_driver')
|
{
"content_hash": "ebc1c43e871d58678b2b584dcecab7ed",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 20,
"alnum_prop": 0.7470588235294118,
"repo_name": "zeinsteinz/tacker",
"id": "7dd252f3edb66b254b3e86d08dd34c289d236b09",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/feyman",
"path": "tacker/db/migration/alembic_migrations/versions/8f7145914cb0_remove_infra_driver_column.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "1197700"
},
{
"name": "Shell",
"bytes": "25674"
}
],
"symlink_target": ""
}
|
import atexit
import errno
import os
import shutil
import sys
import tempfile
import time
import pywatchman
from . import path_utils as path
global_temp_dir = None
class TempDir(object):
"""
This is a helper for locating a reasonable place for temporary files.
When run in the watchman test suite, we compute this up-front and then
store everything under that temporary directory.
When run under the FB internal test runner, we infer a reasonable grouped
location from the process group environmental variable exported by the
test runner.
"""
def __init__(self, keepAtShutdown: bool = False) -> None:
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end.
parent_dir = tempfile.gettempdir()
prefix = "watchmantest"
self.temp_dir = path.get_canonical_filesystem_path(
tempfile.mkdtemp(dir=parent_dir, prefix=prefix)
)
if os.name != "nt":
# On some platforms, setting the setgid bit on a directory doesn't
# work if the user isn't a member of the directory's group. Set the
# group explicitly to avoid this.
os.chown(self.temp_dir, -1, os.getegid())
# Some environments have a weird umask that can leave state
# directories too open and break tests.
os.umask(0o022)
# Redirect all temporary files to that location
tempfile.tempdir = os.fsdecode(self.temp_dir)
self.keep = keepAtShutdown
def cleanup():
if self.keep:
sys.stdout.write("Preserving output in %s\n" % self.temp_dir)
return
self._retry_rmtree(self.temp_dir)
atexit.register(cleanup)
def get_dir(self):
return self.temp_dir
def set_keep(self, value) -> None:
self.keep = value
def _retry_rmtree(self, top) -> None:
# Keep trying to remove it; on Windows it may take a few moments
# for any outstanding locks/handles to be released
for _ in range(1, 10):
shutil.rmtree(top, onerror=_remove_readonly)
if not os.path.isdir(top):
return
sys.stdout.write("Waiting to remove temp data under %s\n" % top)
time.sleep(0.2)
sys.stdout.write("Failed to completely remove %s\n" % top)
def _remove_readonly(func, path, exc_info) -> None:
# If we encounter an EPERM or EACCESS error removing a file try making its parent
# directory writable and then retry the removal. This is necessary to clean up
# eden mount point directories after the checkout is unmounted, as these directories
# are made read-only by "eden clone"
_ex_type, ex, _traceback = exc_info
if not (
isinstance(ex, EnvironmentError) and ex.errno in (errno.EACCES, errno.EPERM)
):
# Just ignore other errors. This will be retried by _retry_rmtree()
return
try:
parent_dir = os.path.dirname(path)
os.chmod(parent_dir, 0o755)
# func() is the function that failed.
# This is usually os.unlink() or os.rmdir().
func(path)
except OSError:
return
def get_temp_dir(keep=None):
global global_temp_dir
if global_temp_dir:
return global_temp_dir
if keep is None:
keep = os.environ.get("WATCHMAN_TEST_KEEP", "0") == "1"
global_temp_dir = TempDir(keep)
return global_temp_dir
|
{
"content_hash": "7e5bf627a11c47ddc00ae6cd3ff6d415",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 88,
"avg_line_length": 32.91509433962264,
"alnum_prop": 0.6311263972484953,
"repo_name": "nodakai/watchman",
"id": "aba87793a109274fb644552fef366fcfa203e58b",
"size": "3670",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "watchman/integration/lib/TempDir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70350"
},
{
"name": "C++",
"bytes": "1042071"
},
{
"name": "CMake",
"bytes": "84403"
},
{
"name": "CSS",
"bytes": "16964"
},
{
"name": "HTML",
"bytes": "36889"
},
{
"name": "Java",
"bytes": "165177"
},
{
"name": "JavaScript",
"bytes": "35299"
},
{
"name": "Python",
"bytes": "853620"
},
{
"name": "Ruby",
"bytes": "23525"
},
{
"name": "Rust",
"bytes": "175867"
},
{
"name": "SCSS",
"bytes": "25549"
},
{
"name": "Shell",
"bytes": "11104"
},
{
"name": "Starlark",
"bytes": "1317"
},
{
"name": "Thrift",
"bytes": "40071"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import abc
from monty.json import MSONable, MontyDecoder
import six
"""
Defines an abstract base class contract for Transformation object.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Sep 23, 2011"
class AbstractTransformation(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract transformation class.
"""
@abc.abstractmethod
def apply_transformation(self, structure):
"""
Applies the transformation to a structure. Depending on whether a
transformation is one-to-many, there may be an option to return a
ranked list of structures.
Args:
structure:
input structure
return_ranked_list:
Boolean stating whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
depending on returned_ranked list, either a transformed structure
or
a list of dictionaries, where each dictionary is of the form
{'structure' = .... , 'other_arguments'}
the key 'transformation' is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
return
@property
@abc.abstractmethod
def inverse(self):
"""
Returns the inverse transformation if available.
Otherwise, should return None.
"""
return
@property
@abc.abstractmethod
def is_one_to_many(self):
"""
Determines if a Transformation is a one-to-many transformation. If a
Transformation is a one-to-many transformation, the
apply_transformation method should have a keyword arg
"return_ranked_list" which allows for the transformed structures to be
returned as a ranked list.
"""
return False
@property
def use_multiprocessing(self):
"""
Indicates whether the transformation can be applied by a
subprocessing pool. This should be overridden to return True for
transformations that the transmuter can parallelize.
"""
return False
|
{
"content_hash": "df12d3e62038628f45f64d4295b7c703",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 32.292682926829265,
"alnum_prop": 0.6401057401812689,
"repo_name": "czhengsci/pymatgen",
"id": "e585994d7ac77db8e3af92ac4f47eebdf2332858",
"size": "2758",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pymatgen/transformations/transformation_abc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6706935"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
from sqlcell.db import DBSessionHandler, EngineHandler
import pandas as pd
class HookHandler(EngineHandler):
"""input common queries to remember with a key/value pair. ie,
%%sql hook
\d=<common query>"
\dt=<another common query>"""
def __init__(self, engine, *args, **kwargs):
super().__init__()
self.hook_engine = engine
def is_engine(self, engine: str):
try:
create_engine(engine)
return True
except:
return False
def add(self, line, cell):
"add hook to db"
cmds_to_add = []
hooks = cell.split('\n\n')
for hook in hooks:
hook = hook.strip()
if hook:
key, cmd = [i.strip() for i in hook.split('=', 1)]
cmds_to_add.append((key, cmd))
for key, cmd in cmds_to_add:
self.session.add(self.Hooks(key=key, engine='', cmd=cmd))
self.session.commit()
return self
def run(self, cell, engine_var):
cell = cell.replace('~', '').split(' ')
engine_alias, sql, cmd_args = cell[0], cell[1], cell[2:]
hook_query = self.session.query(self.Hooks).filter_by(key=sql).first()
hook_cmd = hook_query.cmd
hook_engine = self.get_engine(engine_alias)
self.hook_engine = hook_engine
return hook_engine, hook_cmd.format(*cmd_args)
def list(self, *srgs, **kwargs):
hooks = []
for row in self.session.query(self.Hooks).all():
hook = {
'Alias': row.key,
'Hook': row.cmd,
'Engine': row.engine
}
hooks.append(hook)
return pd.DataFrame(hooks)
def refresh(self, cell):
self.session.query(self.Hooks).delete()
self.session.commit()
|
{
"content_hash": "d387c192dc81d8e695a582e9c7a133b8",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 32.10344827586207,
"alnum_prop": 0.5440386680988185,
"repo_name": "tmthyjames/SQLCell",
"id": "e1966bc3f800ae157eda168ff7d6d105530048d0",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlcell/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14519"
}
],
"symlink_target": ""
}
|
import copy
import testtools
from sahara.conductor import manager
from sahara import context
from sahara import exceptions as ex
import sahara.tests.unit.conductor.base as test_base
SAMPLE_CLUSTER = {
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"tenant_id": "tenant_1",
"is_transient": True,
"name": "test_cluster",
"user_keypair_id": "my_keypair",
"node_groups": [
{
"name": "ng_1",
"flavor_id": "42",
"node_processes": ["p1", "p2"],
"count": 1,
"security_groups": None
},
{
"name": "ng_2",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 3,
"security_groups": ["group1", "group2"]
}
],
"cluster_configs": {
"service_1": {
"config_2": "value_2"
},
"service_2": {
"config_1": "value_1"
}
},
}
class ClusterTest(test_base.ConductorManagerTestCase):
def __init__(self, *args, **kwargs):
super(ClusterTest, self).__init__(
checks=[
lambda: SAMPLE_CLUSTER,
lambda: manager.CLUSTER_DEFAULTS,
lambda: manager.NODE_GROUP_DEFAULTS,
lambda: manager.INSTANCE_DEFAULTS,
], *args, **kwargs)
def test_cluster_create_list_delete(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cluster_db_obj, dict)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(len(lst), 1)
cl_id = lst[0]["id"]
self.api.cluster_destroy(ctx, cl_id)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(len(lst), 0)
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_destroy(ctx, cl_id)
def test_duplicate_cluster_create(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
with testtools.ExpectedException(ex.DBDuplicateEntry):
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
def test_cluster_fields(self):
ctx = context.ctx()
cl_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cl_db_obj, dict)
for key, val in SAMPLE_CLUSTER.items():
if key == 'node_groups':
# this will be checked separately
continue
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
for ng in cl_db_obj["node_groups"]:
ng.pop("created_at")
ng.pop("updated_at")
ng.pop("id")
self.assertEqual(ng.pop("cluster_id"), cl_db_obj["id"])
ng.pop("image_id")
self.assertEqual(ng.pop("instances"), [])
ng.pop("node_configs")
ng.pop("node_group_template_id")
ng.pop("volume_mount_prefix")
ng.pop("volumes_size")
ng.pop("volumes_per_node")
ng.pop("floating_ip_pool")
ng.pop("image_username")
ng.pop("open_ports")
ng.pop("auto_security_group")
ng.pop("tenant_id")
self.assertEqual(SAMPLE_CLUSTER["node_groups"],
cl_db_obj["node_groups"])
def test_cluster_no_ng(self):
ctx = context.ctx()
cluster_schema = copy.deepcopy(SAMPLE_CLUSTER)
cluster_schema.pop('node_groups')
cl_db_obj = self.api.cluster_create(ctx, cluster_schema)
self.assertIsInstance(cl_db_obj, dict)
for key, val in cluster_schema.items():
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
self.assertEqual(cl_db_obj["node_groups"], [])
def test_cluster_update_status(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
updated_cl = self.api.cluster_update(ctx, _id, {"status": "Active"})
self.assertIsInstance(updated_cl, dict)
self.assertEqual(updated_cl["status"], "Active")
get_cl_obj = self.api.cluster_get(ctx, _id)
self.assertEqual(updated_cl, get_cl_obj)
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_update(ctx, "bad_id", {"status": "Active"})
def _ng_in_cluster(self, cluster_db_obj, ng_id):
for ng in cluster_db_obj["node_groups"]:
if ng["id"] == ng_id:
return ng
return None
def test_add_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
node_group = {
"name": "ng_3",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 5
}
ng_id = self.api.node_group_add(ctx, _id, node_group)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "New Node Group not found")
def test_update_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
self.assertEqual(len(cluster_db_obj["node_groups"]), 2)
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_update(ctx, ng_id, {"image_id": "test_image"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "Updated Node Group not found")
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(ng["image_id"], "test_image")
def test_delete_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_remove(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertFalse(found_ng, "Node Group is still in a CLuster")
with testtools.ExpectedException(ex.NotFoundException):
self.api.node_group_remove(ctx, ng_id)
def _add_instance(self, ctx, ng_id):
instance = {
"instance_name": "additional_vm"
}
return self.api.instance_add(ctx, ng_id, instance)
def test_add_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
ng.pop('tenant_id')
self.assertEqual(count + 1, ng["count"])
self.assertEqual("additional_vm",
ng["instances"][0]["instance_name"])
def test_update_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
instance_id = self._add_instance(ctx, ng_id)
self.api.instance_update(ctx, instance_id,
{"management_ip": "1.1.1.1"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("1.1.1.1", ng["instances"][0]["management_ip"])
def test_remove_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
instance_id = self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.api.instance_remove(ctx, instance_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count, ng["count"])
with testtools.ExpectedException(ex.NotFoundException):
self.api.instance_remove(ctx, instance_id)
|
{
"content_hash": "44f6d589d0cde88bd81c87580edce45b",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 76,
"avg_line_length": 32.79197080291971,
"alnum_prop": 0.5496939343350028,
"repo_name": "mapr/sahara",
"id": "1898b4daf296c7dfbf27caad54633c3d32918c37",
"size": "9568",
"binary": false,
"copies": "2",
"ref": "refs/heads/juno-release",
"path": "sahara/tests/unit/conductor/manager/test_clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "PigLatin",
"bytes": "161"
},
{
"name": "Python",
"bytes": "2112549"
},
{
"name": "Shell",
"bytes": "35267"
}
],
"symlink_target": ""
}
|
"""
In this module you will find PyGridSizer and PyFlexGridSizer. Please
note that these sizers have since been ported to C++ (as wx.GridSizer
and wx.FlexGridSizer) and those versions are now exposed in the regular
wxPython wrappers. However I am also leaving them here in the library
so they can serve as an example of how to implement sizers in Python.
PyGridSizer: Sizes and positions items such that all rows are the same
height and all columns are the same width. You can specify a gap in
pixels to be used between the rows and/or the columns. When you
create the sizer you specify the number of rows or the number of
columns and then as you add items it figures out the other dimension
automatically. Like other sizers, items can be set to fill their
available space, or to be aligned on a side, in a corner, or in the
center of the space. When the sizer is resized, all the items are
resized the same amount so all rows and all columns remain the same
size.
PyFlexGridSizer: Derives from PyGridSizer and adds the ability for
particular rows and/or columns to be marked as growable. This means
that when the sizer changes size, the growable rows and colums are the
ones that stretch. The others remain at their initial size.
"""
import operator
import warnings
import wx
warningmsg = r"""\
################################################\
# THIS MODULE IS DEPRECATED |
# |
# You should use the native wx.GridSizer and |
# wx.FlexGridSizer unless there is a compelling |
# need to use this module. |
################################################/
"""
warnings.warn(warningmsg, DeprecationWarning, stacklevel=2)
#----------------------------------------------------------------------
class PyGridSizer(wx.PySizer):
def __init__(self, rows=0, cols=0, hgap=0, vgap=0):
wx.PySizer.__init__(self)
if rows == 0 and cols == 0:
raise ValueError, "rows and cols cannot both be zero"
self.rows = rows
self.cols = cols
self.hgap = hgap
self.vgap = vgap
def SetRows(self, rows):
if rows == 0 and self.cols == 0:
raise ValueError, "rows and cols cannot both be zero"
self.rows = rows
def SetColumns(self, cols):
if self.rows == 0 and cols == 0:
raise ValueError, "rows and cols cannot both be zero"
self.cols = cols
def GetRows(self):
return self.rows
def GetColumns(self):
return self.cols
def SetHgap(self, hgap):
self.hgap = hgap
def SetVgap(self, vgap):
self.vgap = vgap
def GetHgap(self, hgap):
return self.hgap
def GetVgap(self, vgap):
return self.vgap
#--------------------------------------------------
def CalcMin(self):
items = self.GetChildren()
nitems = len(items)
nrows = self.rows
ncols = self.cols
if ncols > 0:
nrows = (nitems + ncols-1) / ncols
else:
ncols = (nitems + nrows-1) / nrows
# Find the max width and height for any component.
w = 0
h = 0
for item in items:
size = item.CalcMin()
w = max(w, size.width)
h = max(h, size.height)
return wx.Size(ncols * w + (ncols-1) * self.hgap,
nrows * h + (nrows-1) * self.vgap)
#--------------------------------------------------
def RecalcSizes(self):
items = self.GetChildren()
if not items:
return
nitems = len(items)
nrows = self.rows
ncols = self.cols
if ncols > 0:
nrows = (nitems + ncols-1) / ncols
else:
ncols = (nitems + nrows-1) / nrows
sz = self.GetSize()
pt = self.GetPosition()
w = (sz.width - (ncols - 1) * self.hgap) / ncols;
h = (sz.height - (nrows - 1) * self.vgap) / nrows;
x = pt.x
for c in range(ncols):
y = pt.y
for r in range(nrows):
i = r * ncols + c
if i < nitems:
self.SetItemBounds(items[i], x, y, w, h)
y = y + h + self.vgap
x = x + w + self.hgap
#--------------------------------------------------
def SetItemBounds(self, item, x, y, w, h):
# calculate the item's size and position within
# its grid cell
ipt = wx.Point(x, y)
isz = item.CalcMin()
flag = item.GetFlag()
if flag & wx.EXPAND or flag & wx.SHAPED:
isz = (w, h)
else:
if flag & wx.ALIGN_CENTER_HORIZONTAL:
ipt.x = x + (w - isz.width) / 2
elif flag & wx.ALIGN_RIGHT:
ipt.x = x + (w - isz.width)
if flag & wx.ALIGN_CENTER_VERTICAL:
ipt.y = y + (h - isz.height) / 2
elif flag & wx.ALIGN_BOTTOM:
ipt.y = y + (h - isz.height)
item.SetDimension(ipt, isz)
#----------------------------------------------------------------------
class PyFlexGridSizer(PyGridSizer):
def __init__(self, rows=0, cols=0, hgap=0, vgap=0):
PyGridSizer.__init__(self, rows, cols, hgap, vgap)
self.rowHeights = []
self.colWidths = []
self.growableRows = []
self.growableCols = []
def AddGrowableRow(self, idx):
self.growableRows.append(idx)
def AddGrowableCol(self, idx):
self.growableCols.append(idx)
#--------------------------------------------------
def CalcMin(self):
items = self.GetChildren()
nitems = len(items)
nrows = self.rows
ncols = self.cols
if ncols > 0:
nrows = (nitems + ncols-1) / ncols
else:
ncols = (nitems + nrows-1) / nrows
# Find the max width and height for any component.
self.rowHeights = [0] * nrows
self.colWidths = [0] * ncols
for i in range(len(items)):
size = items[i].CalcMin()
row = i / ncols
col = i % ncols
self.rowHeights[row] = max(size.height, self.rowHeights[row])
self.colWidths[col] = max(size.width, self.colWidths[col])
# Add up all the widths and heights
cellsWidth = reduce(operator.__add__, self.colWidths)
cellHeight = reduce(operator.__add__, self.rowHeights)
return wx.Size(cellsWidth + (ncols-1) * self.hgap,
cellHeight + (nrows-1) * self.vgap)
#--------------------------------------------------
def RecalcSizes(self):
items = self.GetChildren()
if not items:
return
nitems = len(items)
nrows = self.rows
ncols = self.cols
if ncols > 0:
nrows = (nitems + ncols-1) / ncols
else:
ncols = (nitems + nrows-1) / nrows
minsz = self.CalcMin()
sz = self.GetSize()
pt = self.GetPosition()
# Check for growables
if self.growableRows and sz.height > minsz.height:
delta = (sz.height - minsz.height) / len(self.growableRows)
for idx in self.growableRows:
self.rowHeights[idx] = self.rowHeights[idx] + delta
if self.growableCols and sz.width > minsz.width:
delta = (sz.width - minsz.width) / len(self.growableCols)
for idx in self.growableCols:
self.colWidths[idx] = self.colWidths[idx] + delta
# bottom right corner
sz = wx.Size(pt.x + sz.width, pt.y + sz.height)
# Layout each cell
x = pt.x
for c in range(ncols):
y = pt.y
for r in range(nrows):
i = r * ncols + c
if i < nitems:
w = max(0, min(self.colWidths[c], sz.width - x))
h = max(0, min(self.rowHeights[r], sz.height - y))
self.SetItemBounds(items[i], x, y, w, h)
y = y + self.rowHeights[r] + self.vgap
x = x + self.colWidths[c] + self.hgap
#----------------------------------------------------------------------
|
{
"content_hash": "6314128b582e8ec91e9d98bfa5ae5ef3",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 73,
"avg_line_length": 30.974545454545453,
"alnum_prop": 0.49624324958910543,
"repo_name": "ktan2020/legacy-automation",
"id": "bfa9578aa1ff6cca643446d7b5c56149c28a1ac1",
"size": "9557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/grids.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
'''
docscore-util - Lexicon-Based Sentiment Analysis Library
Utility methods for lexicon based sentiment analysis
'''
from __future__ import absolute_import
from . import sentlex
import re
import math
import os
from . import negdetect
from . import stopwords
# Score adjustment functions
def scoreSimple(score, position, totaltags):
'''
simple - returns original score
'''
return score
# Returns adjusted score based on linear function of word distance to end of document
def scoreAdjLinear(score, position, totaltags):
'''
Linear adjustment - adjusts a given numeric score linearly, based on its position in the document.
'''
C = 1.0
return (score * (position * C) / totaltags)
# Returns 50% of score value if the term is found at 1st half of document
def scoreAdjModular(score, position, totaltags):
'''
Returns 50% of score if the term is found at 1st half of document.
Returns original value if term found at 2nd half of document.
'''
if (position) >= (totaltags / 2.0):
return score
else:
return (score + 0.0) / 2.0
# Voting schemes
def majorityVote(resultList, shift, threshold):
'''
Given a list of tuples (posscore, negscore) determine pos/neg sentiment by majority voting.
Works best with odd number of classifiers!
Each classifier gives 1 vote for either pos/neg class.
Returns prediction result in form (pos,neg,posscore,negscore)
'''
posvotes = 0
negvotes = 0
for L in resultList:
totalpos = L[0]
totalneg = L[1]
if (totalneg - shift) - totalpos > threshold:
# predicted a negative
negvotes += 1
else:
if totalpos - (totalneg - shift) >= threshold:
posvotes += 1
scores = (posvotes / len(resultList) + 0.0, negvotes / len(resultList) + 0.0)
if negvotes > posvotes:
return (0, 1) + scores
elif posvotes > negvotes:
return (1, 0) + scores
else:
return (0, 0) + scores
def sumVote(resultList, shift, threshold):
"""
Given a list of predition result tuples (posscore, negscore) determines overall sentiment by
the sum of scores from each classifier decision score (valued [0,1]).
Returns tuple (posflag, negflag, posscore, negscore)
"""
postotal = 0.0
negtotal = 0.0
lenf = float(len(resultList))
for L in resultList:
pos = L[0]
neg = L[1]
postotal += pos
negtotal += neg
if postotal - (negtotal - shift) >= threshold:
return (1, 0, postotal / lenf, negtotal / lenf)
else:
return (0, 1, postotal / lenf, negtotal / lenf)
def maxVote(resultList, shift, threshold):
"""
Given a list of prediction result tuples (pos, neg, hitratio) determine overall sentiment based on maximum
value obtained in either class on across all predictions.
"""
maxp = max([x[0] for x in resultList])
maxn = max([x[1] for x in resultList])
if maxp - (maxn - shift) >= threshold:
return (1, 0, maxp, maxn)
else:
return (0, 1, maxp, maxn)
###
#
# Legacy Code - To be removed
#
###
# Constants that drive scoring behavior
SCOREALL = 0
SCOREONCE = 1
SCOREONCEINPARAGRAPH = 2
SCOREWITHFREQ = 3
SCOREWITHSTOP = 4
# Load default stopwords
objectiveWords = stopwords.Stopword()
# Document score calculation
def docSentiScore(L, Doc, aflag, vflag, rflag, nflag, negflag, negwindow=5,
w=lambda x, y, z: x,
scoringmethod=SCOREALL):
'''
docSentiScore - Computes document sentiment score for POS-tagged Doc using sentiment lexicon L.
The following mandatory flags specify which POS tags to take into account:
- aflag
- vflag
- rflag (adverbs)
- nflag
In addition, negflag is a boolean indicating whether to use negation detection, with window scope negwindow.
w(x,i,N) is a weight adjustment function based on word position within the document.
scoringmethod is a list of non-exlcusive parameters used to switch on/off scoring features.
Returns tuple (posscore, negscore, doc) containing final document scores and annotated document.
'''
# 0. Initialize
tags = Doc.split()
annotatedTags = []
doclen = len(tags)
i = 0
postotal = 0
negtotal = 0
notfoundcounter = 0
foundcounter = 0
negcount = 0
# Setup stem preprocessing for verbs
wnl = nltk.stem.WordNetLemmatizer()
# 1. Negation detection pre-processing - return an array w/ position of negated terms
vNEG = negdetect.getNegationArray(tags, negwindow)
#
# 2. Scan for scores for each POS
# After POS-tagging a term will appear as either term/POS or term_POS
# We assume such weirdnesses will not naturally occur on plain text.
#
posindex = 0
negindex = 1
tagList = []
for tagword in tags:
i += 1
scoretuple = (0, 0)
tagfound = False
tagseparator = ''
# Adjectives
if aflag == True and re.search('[_/](JJ|JJ.)$', tagword):
tagfound = True
tagseparator = tagword[re.search('[_/](JJ|JJ.)$', tagword).start()]
thisterm = tagword.split(tagseparator)[0]
scoretuple = L.getadjective(thisterm)
# Verbs (VBP / VBD/ etc...)
if vflag == True and re.search('[_/](VB|VB.)$', tagword):
tagfound = True
tagseparator = tagword[re.search('[_/](VB|VB.)$', tagword).start()]
thisterm = tagword.split(tagseparator)[0]
thisterm = wnl.lemmatize(thisterm, pos='v')
scoretuple = L.getverb(thisterm)
# Adverbs
if rflag == True and re.search('[_/]RB$', tagword):
tagfound = True
tagseparator = tagword[re.search('[_/]RB$', tagword).start()]
thisterm = tagword.split(tagseparator)[0]
scoretuple = L.getadverb(thisterm)
# Nouns
if nflag == True and re.search('[_/]NN$', tagword):
tagfound = True
tagseparator = tagword[re.search('[_/]NN$', tagword).start()]
thisterm = tagword.split(tagseparator)[0]
scoretuple = L.getnoun(thisterm)
# Process negation detection
if negflag == True:
posindex = vNEG[i - 1]
negindex = (1 + vNEG[i - 1]) % 2
#
# Add to total with weight score
#
posval = 0.0
negval = 0.0
if tagfound and \
(
(scoringmethod == SCOREALL) or
(scoringmethod == SCOREWITHFREQ) or
(scoringmethod == SCOREONCE and (not tagword in tagList)) or
(scoringmethod == SCOREWITHSTOP and (not objectiveWords.is_stop(thisterm)))
):
if (scoringmethod in [SCOREWITHFREQ, SCOREWITHSTOP]):
# Scoring with frequency information
# Frequency is a real valued at 0.0-1.0. We calculate sqrt function so
# that the value grows faster even for numbers close to 0
posval += w(scoretuple[posindex], i, doclen) * \
(1.0 - math.sqrt(L.get_freq(thisterm)))
negval += w(scoretuple[negindex], i, doclen) * \
(1.0 - math.sqrt(L.get_freq(thisterm)))
else:
# Just plain scoring from lexicon - add
posval += w(scoretuple[posindex], i, doclen)
negval += w(scoretuple[negindex], i, doclen)
postotal += posval
negtotal += negval
# Found a tag - increase counters and add tag to list
if tagfound:
tagList.append(tagword)
foundcounter += 1.0
if negflag == True and vNEG[i - 1] == 1:
negcount += 1
if scoretuple == (0, 0):
notfoundcounter += 1.0
# add this tag back to annotated version
if tagfound:
if negflag:
negtag = str(vNEG[i - 1])
else:
negtag = 'NONEG'
annotatedTags.append(tagword + '##NEGAT:' + negtag + '##POS:' +
str(posval) + '##NEG:' + str(negval))
else:
annotatedTags.append(tagword)
# Completed scan
# Add negated words to negative score (Potts 2011)
# alpha is a scaling factor based on how much of the document has been negated
if foundcounter > 0.0:
alpha = (negcount) / (foundcounter)
else:
alpha = 0.0
##negtotal += negcount * ( 0.5 * (1 - alpha) )
#negtotal += negcount * 0.3
if (foundcounter - notfoundcounter) > 0.0:
# resulting scores are the normalized proportions of all *scored* terms
# (ignoring neutrals/unknowns)
resultpos = float(postotal) / (foundcounter - notfoundcounter)
resultneg = float(negtotal) / (foundcounter - notfoundcounter)
else:
resultpos = 0.0
resultneg = 0.0
return (resultpos, resultneg, ' '.join(annotatedTags))
|
{
"content_hash": "7799ed6b3561b0f144a4918bb0bae44a",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 113,
"avg_line_length": 31.658536585365855,
"alnum_prop": 0.5946511116002642,
"repo_name": "bohana/sentlex",
"id": "aa03e39f6a4efd76c6a9556eaf1f5358784376b1",
"size": "9086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentlex/docscoreutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lex",
"bytes": "13720057"
},
{
"name": "Python",
"bytes": "80265"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
"""Fichier définissant les files d'attente des contextes :
- La classe Filecontexte modélisant une file d'attente des contextes
- L'exception fileVide
"""
from abstraits.obase import BaseObj
class FileContexte(BaseObj):
"""Cette classe définie une file d'attente des contextes.
C'est une classe enveloppe de liste. On interragit avec cette
classe qu'avec plusieurs méthodes :
ajouter(self, objet) -- ajoute l'objet en tête de file
retirer(self) -- retire l'objet en tête de file et le retourne
Cette file peut être parcourue et les différents items qu'elle contient
sont indexées (file[0] retourne le premier élément).
"""
def __init__(self, parent=None):
"""Constructeur de la file, initialement vide."""
BaseObj.__init__(self)
self._file = [] # la liste représentant la file d'attente
self._taille_min = 1 # la taille minimum de la file d'attente
self._position = 0 # position dans la file
self.parent = parent
self._construire()
def __getnewargs__(self):
"""Méthode retournant les arguments à passer au constructeur"""
return ()
def __getitem__(self, index):
"""Retourne l'objet se trouvant à l'index 'index'
index peut également être un type de contexte.
"""
if isinstance(index, int):
try:
return self._file.__getitem__(index)
except IndexError as err:
print(self._file, index)
raise err
else:
for contexte in self._file:
if isinstance(contexte, index):
print(contexte)
return contexte
raise ValueError("contexte de type inconnu".format(index))
def __setitem__(self, index, contexte):
"""Change le contexte se trouvant à l'index 'index'"""
self._file.__setitem__(index, contexte)
def __len__(self):
"""Retourne la taille de la file"""
return len(self._file)
def __iter__(self):
"""Retourne l'itérateur de la file"""
return iter(self._file)
def __str__(self):
"""Retourne la file"""
return "f" + str(self._file)
def _get_position(self):
return self._position
def _set_position(self, position):
self._position = position
position = property(_get_position, _set_position)
def get(self, index):
"""Essaye de récupérer le contexte à l'index indiqué.
Si échoue, retourne None.
Note : index doit être positif.
"""
if index < 0:
raise IndexError
try:
contexte = self[index]
except IndexError:
contexte = None
return contexte
def get_position(self, contexte):
"""Retourne la position du contexte passé en paramètre.
Si le contexte ne peut être trouvé, retourne la position actuelle.
"""
try:
return self._file.index(contexte)
except ValueError:
return self._position
def get_contexte_par_unom(self, unom):
"""Retourne, si trouvé, le contexte du nom indiqué."""
for contexte in self._file:
if contexte.unom == unom:
return contexte
raise KeyError("contexte introuvable {}".format(unom))
def ajouter(self, objet):
"""Ajoute l'objet à ajouter en index self._position."""
self._file.insert(self._position, objet)
self.actualiser_position()
def retirer(self, contexte=None):
"""Retire le contexte précisé ou actuel et le retourne.
Si la taille de la liste est trop faible (self._taille_min), une
exception est levée.
Si le contexte est précisé, retire le contexte précis.
"""
if contexte:
self._file.remove(contexte)
self.actualiser_position()
else:
if len(self._file) <= self._taille_min:
raise FileVide
contexte = self.actuel
del self._file[self._position]
self.actualiser_position()
return contexte
def vider(self):
"""Vide la file des contextes"""
self._file[:] = []
self.actualiser_position()
@property
def actuel(self):
"""Retourne le contexte actuel.
On se base sur la position pour savoir quel est le contexte actuel.
Si le contexte n'est pas trouvé, lève une exception IndexError.
"""
return self[self._position]
def avancer_position(self):
"""Avance la position (déplacement positif).
Si aucun contexte n'est trouvé à la position ciblée, lève une
exception IndexError.
Retourne le nouveau contexte actuel.
"""
nouveau_contexte = self[self._position + 1]
self._position += 1
self.actualiser_position()
return nouveau_contexte
def reculer_position(self):
"""Recule la position (déplacement négatif).
Si aucun contexte n'est trouvé à la position ciblée, lève une
exception IndexError.
Retourne le nouveau contexte actuel.
"""
if self._position <= 0:
raise IndexError
nouveau_contexte = self[self._position - 1]
self._position -= 1
self.actualiser_position()
return nouveau_contexte
def actualiser_position(self):
"""Actualise la position.
Si la position est supérieure à la liste des contextes,
la remet à un nouveau raisonnable.
"""
if len(self._file) == 0:
self._position = 0
elif self._position >= len(self._file):
self._position = len(self._file) - 1
class FileVide(RuntimeError):
"""Exception appelée quand la file est vide ou d'une taille insuffisante.
"""
def __str__(self):
"""Retourne l'exception de façon plus compréhensible"""
return "la file d'attente est vide ou d'une taille trop faible"
|
{
"content_hash": "f4793c7e38e4081b73daef373c54d718",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 77,
"avg_line_length": 29.110576923076923,
"alnum_prop": 0.5919075144508671,
"repo_name": "stormi/tsunami",
"id": "2d475a1a250106c17336c89eb8a983a1a4c53e20",
"size": "7674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/interpreteur/file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
"""
AUTHOR: Gabriel Bassett
DATE: 12-17-2013
DEPENDENCIES: a list of modules requiring installation
Copyright 2014 Gabriel Bassett
LICENSE:
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
DESCRIPTION:
Functions necessary to enrich the context graph
"""
# PRE-USER SETUP
pass
########### NOT USER EDITABLE ABOVE THIS POINT #################
# USER VARIABLES
TLD_CONFIG_FILE = "tld.yapsy-plugin"
NAME = "TLD Enrichment"
########### NOT USER EDITABLE BELOW THIS POINT #################
## IMPORTS
from yapsy.IPlugin import IPlugin
import logging
import networkx as nx
from datetime import datetime # timedelta imported above
import dateutil # to parse variable time strings
import uuid
import ConfigParser
import inspect
try:
import tldextract
module_import_success = True
except:
module_import_success = False
logging.error("Module import failed. Please install the following module: tldextract.")
raise
## SETUP
__author__ = "Gabriel Bassett"
loc = inspect.getfile(inspect.currentframe())
ind = loc.rfind("/")
loc = loc[:ind+1]
config = ConfigParser.SafeConfigParser()
config.readfp(open(loc + TLD_CONFIG_FILE))
if config.has_section('Core'):
if 'name' in config.options('Core'):
NAME = config.get('Core', 'name')
## EXECUTION
class PluginOne(IPlugin):
def __init__(self):
pass
def configure(self):
"""
:return: return list of [configure success (bool), name, description, list of acceptable inputs, resource cost (1-10, 1=low), speed (1-10, 1=fast)]
"""
config_options = config.options("Configuration")
if 'cost' in config_options:
cost = config.get('Configuration', 'cost')
else:
cost = 9999
if 'speed' in config_options:
speed = config.get('Configuration', 'speed')
else:
speed = 9999
if 'type' in config_options:
plugin_type = config.get('Configuration', 'Type')
else:
logging.error("'Type' not specified in config file.")
return [None, False, NAME, "Takes a domain name and returns the top level domain, mid-domain, and sub-domain as networkx graph.", None, cost, speed]
if 'inputs' in config_options:
inputs = config.get('Configuration', 'Inputs')
inputs = [l.strip().lower() for l in inputs.split(",")]
else:
logging.error("No input types specified in config file.")
return [plugin_type, False, NAME, "Takes a domain name and returns the top level domain, mid-domain, and sub-domain as networkx graph.", None, cost, speed]
if not module_import_success:
logging.error("Module import failure caused configuration failure.")
return [plugin_type, False, NAME, "Takes a domain name and returns the top level domain, mid-domain, and sub-domain as networkx graph.", inputs, cost, speed]
else:
return [plugin_type, True, NAME, "Takes a domain name and returns the top level domain, mid-domain, and sub-domain as networkx graph.", inputs, cost, speed]
def run(self, domain, start_time="", include_subdomain=False):
""" str, str -> networkx multiDiGraph
:param domain: a string containing a domain to look up
:param start_time: string in ISO 8601 combined date and time format (e.g. 2014-11-01T10:34Z) or datetime object.
:param include_subdomain: Boolean value. Default False. If true, subdomain will be returned in enrichment graph
:return: a networkx graph representing the sections of the domain
"""
# Parse the start_time
if type(start_time) is str:
try:
time = dateutil.parser.parse(start_time).strftime("%Y-%m-%dT%H:%M:%SZ")
except:
time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
elif type(start_time) is datetime:
time = start_time.strftime("%Y-%m-%dT%H:%M:%SZ")
else:
time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
ext = tldextract.extract(domain)
g = nx.MultiDiGraph()
# Get or create Domain node
domain_uri = "class=attribute&key={0}&value={1}".format("domain", domain)
g.add_node(domain_uri, {
'class': 'attribute',
'key': "domain",
"value": domain,
"start_time": time,
"uri": domain_uri
})
# Get or create Enrichment node
tld_extract_uri = "class=attribute&key={0}&value={1}".format("enrichment", "tld_extract")
g.add_node(tld_extract_uri, {
'class': 'attribute',
'key': "enrichment",
"value": "tld_extract",
"start_time": time,
"uri": tld_extract_uri
})
# Get or create TLD node
tld_uri = "class=attribute&key={0}&value={1}".format("domain", ext.suffix)
g.add_node(tld_uri, {
'class': 'attribute',
'key': "domain",
"value": ext.suffix,
"start_time": time,
"uri": tld_uri
})
# Link domain to tld
edge_attr = {
"relationship": "describedBy",
"start_time": time,
"origin": "tld_extract",
"describedBy":"suffix"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, tld_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, tld_uri, edge_uri, edge_attr)
# Get or create mid domain node
mid_domain_uri = "class=attribute&key={0}&value={1}".format("domain", ext.domain)
g.add_node(mid_domain_uri, {
'class': 'attribute',
'key': "domain",
"value": ext.domain,
"start_time": time,
"uri": mid_domain_uri
})
# Link domain to mid_domain
edge_attr = {
"relationship": "describedBy",
"start_time": time,
"origin": "tld_extract",
"describedBy":"domain"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, mid_domain_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, mid_domain_uri, edge_uri, edge_attr)
# if including subdomains, create subdomain and node
if include_subdomain:
# Get or create mid domain node
subdomain_uri = "class=attribute&key={0}&value={1}".format("domain", ext.subdomain)
g.add_node(subdomain_uri, {
'class': 'attribute',
'key': "domain",
"value": ext.domain,
"start_time": time,
"uri": subdomain_uri
})
# Link domain to mid_domain
edge_attr = {
"relationship": "describedBy",
"start_time": time,
"origin": "tld_extract",
"describedBy":"subdomain"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, subdomain_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, subdomain_uri, edge_uri, edge_attr)
# Link domain to enrichment
edge_attr = {
"relationship": "describedBy",
"start_time": time,
"origin": "tld_extract"
}
source_hash = uuid.uuid3(uuid.NAMESPACE_URL, domain_uri)
dest_hash = uuid.uuid3(uuid.NAMESPACE_URL, tld_extract_uri)
edge_uri = "source={0}&destionation={1}".format(str(source_hash), str(dest_hash))
rel_chain = "relationship"
while rel_chain in edge_attr:
edge_uri = edge_uri + "&{0}={1}".format(rel_chain,edge_attr[rel_chain])
rel_chain = edge_attr[rel_chain]
if "origin" in edge_attr:
edge_uri += "&{0}={1}".format("origin", edge_attr["origin"])
edge_attr["uri"] = edge_uri
g.add_edge(domain_uri, tld_extract_uri, edge_uri, edge_attr)
return g
|
{
"content_hash": "94ce3b5e98b85bb663362f4b9beee15d",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 169,
"avg_line_length": 37.932330827067666,
"alnum_prop": 0.5879088206144698,
"repo_name": "vz-risk/Verum",
"id": "b19ce53e839eed80387eecc14d73fc1d854c289b",
"size": "10112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/tld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "278301"
}
],
"symlink_target": ""
}
|
import time
import shlex
from hurry.filesize import size
from texttable import Texttable
from ussclicore.argumentParser import ArgumentParser, ArgumentParserError
from ussclicore.cmd import Cmd, CoreGlobal
from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
FileTransferSpeed, FormatLabel, Percentage, \
ProgressBar, ReverseBar, RotatingMarker, \
SimpleProgress, Timer, UnknownLength
from ussclicore.utils import generics_utils, printer, progressbar_widget, download_utils
from hammr.utils import *
from hammr.utils.hammr_utils import *
from hammr.utils.deployment_utils import *
from hammr.utils.publish_utils import *
from hammr.utils.publish_builders import *
#This import and configuration avoid pyxb warnings about xmls
import logging
logging.basicConfig()
logging.getLogger("pyxb").setLevel(logging.ERROR)
class Image(Cmd, CoreGlobal):
"""List, download or delete existing machine images. Publish new machine image to cloud account from configuration file"""
cmd_name = "image"
pbar = None
def __init__(self):
super(Image, self).__init__()
def arg_list(self):
doParser = ArgumentParser(prog=self.cmd_name + " list", add_help=True,
description="Displays all the machine images built and publish information of those machine images to their respective target platforms")
return doParser
def do_list(self, args):
try:
# call UForge API
# get images
printer.out("Getting all images and publications for [" + self.login + "] ...", printer.INFO)
images = self.get_all_images()
if len(images) == 0 :
printer.out("No image available", printer.INFO)
else :
printer.out("Images:")
table = self.initialize_text_table(800)
table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t", "t", "t"])
table.header(
["Id", "Name", "Version", "Rev.", "Format", "Created", "Size", "Compressed", "Generation Status"])
images = generics_utils.order_list_object_by(images, "name")
for image in images:
imgStatus = self.get_image_status(image.status)
table.add_row([image.dbId, image.name, image.version, image.revision, image.targetFormat.name,
image.created.strftime("%Y-%m-%d %H:%M:%S"), size(image.fileSize),
"X" if image.compress else "", imgStatus])
print table.draw() + "\n"
printer.out("Found " + str(len(images)) + " images", printer.INFO)
# get publications
publish_images = self.api.Users(self.login).Pimages.Getall()
publish_images = publish_images.publishImages.publishImage
if publish_images is None or len(publish_images) == 0:
printer.out("No publication available", printer.INFO)
return 0
printer.out("Publications:")
table = self.initialize_text_table(800)
table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t"])
table.header(["Template name", "Image ID", "Publish ID", "Account name", "Format", "Cloud ID", "Status"])
publish_images = generics_utils.order_list_object_by(publish_images, "name")
for publish_image in publish_images:
pubStatus = get_publish_status(publish_image.status)
table.add_row([publish_image.name,
generics_utils.extract_id(publish_image.imageUri),
publish_image.dbId,
publish_image.credAccount.name if publish_image.credAccount is not None else "-",
publish_image.credAccount.targetPlatform.name,
publish_image.cloudId if publish_image.cloudId is not None else "-", pubStatus])
print table.draw() + "\n"
printer.out("Found " + str(len(publish_images)) + " publications", printer.INFO)
return 0
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
self.help_list()
except Exception as e:
return handle_uforge_exception(e)
def help_list(self):
doParser = self.arg_list()
doParser.print_help()
def arg_publish(self):
do_parser = ArgumentParser(prog=self.cmd_name + " publish", add_help=True,
description="Publish (upload and register) a built machine image to a target environment")
mandatory = do_parser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True,
help="yaml/json file providing the cloud account parameters required for upload and registration")
optional = do_parser.add_argument_group("optional arguments")
optional.add_argument('--id', dest='id', required=False, help="id of the image to publish")
return do_parser
def do_publish(self, args):
try:
do_args = self.parse_args(args)
template = retrieve_template_from_file(do_args.file)
if do_args.id:
self.do_publish_with_id(do_args.id, template)
else:
self.do_publish_without_id(template)
except KeyError as e:
printer.out("unknown error template file, key: " + str(e), printer.ERROR)
return 2
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
return 2
self.help_publish()
except ValueError as e:
printer.out(str(e), printer.ERROR)
return 2
except KeyboardInterrupt:
return 2
except Exception as e:
return handle_uforge_exception(e)
def help_publish(self):
doParser = self.arg_publish()
doParser.print_help()
def arg_deploy(self):
doParser = ArgumentParser(prog=self.cmd_name + " deploy", add_help=True,
description="Deploy an instance of a published image on the targeted cloud.")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True,
help="yaml/json file providing the instance parameters required for deployment on targeted cloud")
mandatory.add_argument('--publish-id', dest='pid', required=True,
help="the ID of the published image to deploy")
return doParser
def do_deploy(self, args):
try:
# add arguments
do_parser = self.arg_deploy()
do_args = do_parser.parse_args(shlex.split(args))
# if the help command is called, parse_args returns None object
if not do_args:
return 2
publish_image = self.get_publish_image_from_publish_id(do_args.pid)
if not self.is_publish_image_ready_to_deploy(publish_image):
raise ValueError("Published image with id '" + do_args.pid + " is not ready to be deployed")
deploy_file = generics_utils.get_file(do_args.file)
if deploy_file is None:
raise TypeError("Deploy file not found")
if publish_image.targetFormat is None:
raise TypeError("Publish image target format not found")
target_plateform_name = publish_image.targetFormat.name
if "Amazon" in target_plateform_name:
return self.deploy_aws(deploy_file, publish_image)
elif "OpenStack" in target_plateform_name:
return self.deploy_openstack(deploy_file, publish_image)
elif "Azure" in target_plateform_name:
return self.deploy_azure(deploy_file, publish_image)
else:
printer.out("Hammr only supports deployments for Amazon AWS, OpenStack and Microsoft Azure ARM.",
printer.ERROR)
return 2
except (TypeError, ValueError) as e:
printer.out(str(e), printer.ERROR)
return 2
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
self.help_deploy()
except KeyboardInterrupt:
printer.out(
"You have exited the command-line, however the deployment may still be in progress. Please go to the cloud's console for more information",
printer.WARNING)
pass
except Exception as e:
return handle_uforge_exception(e)
def help_deploy(self):
doParser = self.arg_deploy()
doParser.print_help()
def arg_delete(self):
do_parser = ArgumentParser(prog=self.cmd_name + " delete", add_help=True,
description="Deletes a machine image or publish information")
mandatory = do_parser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the machine image to delete")
return do_parser
def do_delete(self, args):
try:
# add arguments
do_parser = self.arg_delete()
try:
do_args = do_parser.parse_args(shlex.split(args))
except SystemExit as e:
return
# call UForge API
printer.out("Searching image with id [" + do_args.id + "] ...", printer.INFO)
images = self.get_all_images()
if len(images) == 0 :
raise ValueError("No image found")
table = self.initialize_text_table(800)
table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t", "t", "t"])
table.header(["Id", "Name", "Version", "Rev.", "Format", "Created", "Size", "Compressed", "Status"])
delete_image = None
for image in images:
if str(image.dbId) == str(do_args.id):
img_status = self.get_image_status(image.status)
table.add_row([image.dbId, image.name, image.version, image.revision, image.targetFormat.name,
image.created.strftime("%Y-%m-%d %H:%M:%S"), size(image.size),
"X" if image.compress else "", img_status])
delete_image = image
if delete_image is not None:
print table.draw() + "\n"
if generics_utils.query_yes_no(
"Do you really want to delete image with id " + str(delete_image.dbId)):
self.delete_image(delete_image)
printer.out("Image deleted", printer.OK)
return 0
else:
printer.out("Image not found", printer.ERROR)
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
self.help_delete()
return 2
except ValueError as e:
printer.out(str(e), printer.ERROR)
return 2
except Exception as e:
return handle_uforge_exception(e)
def help_delete(self):
do_parser = self.arg_delete()
do_parser.print_help()
def arg_cancel(self):
do_parser = ArgumentParser(prog=self.cmd_name + " cancel", add_help=True,
description="Cancels a machine image build or publish")
mandatory = do_parser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the machine image to cancel")
return do_parser
def do_cancel(self, args):
try:
# add arguments
do_parser = self.arg_cancel()
try:
do_args = do_parser.parse_args(shlex.split(args))
except SystemExit as e:
return 2
# call UForge API
printer.out("Searching image with id [" + do_args.id + "] ...", printer.INFO)
images = self.get_all_images()
if len(images) == 0 :
raise ValueError("No image found")
table = self.initialize_text_table(800)
table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t", "t", "t"])
table.header(["Id", "Name", "Version", "Rev.", "Format", "Created", "Size", "Compressed", "Status"])
cancel_image = None
for image in images:
if str(image.dbId) == str(do_args.id):
img_status = self.get_image_status(image.status)
table.add_row([image.dbId, image.name, image.version, image.revision, image.targetFormat.name,
image.created.strftime("%Y-%m-%d %H:%M:%S"), size(image.size),
"X" if image.compress else "", img_status])
print table.draw() + "\n"
cancel_image = image
if cancel_image is None or cancel_image.status.complete or cancel_image.status.cancelled:
raise ValueError("Image not being generated, impossible to canceled")
if cancel_image is not None:
if generics_utils.query_yes_no(
"Do you really want to cancel image with id " + str(cancel_image.dbId)):
self.cancel_image(cancel_image)
else:
printer.out("Image not found", printer.ERROR)
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
self.help_delete()
return 2
except ValueError as e:
printer.out(str(e), printer.ERROR)
return 2
except Exception as e:
return handle_uforge_exception(e)
def help_cancel(self):
do_parser = self.arg_cancel()
do_parser.print_help()
def arg_download(self):
doParser = ArgumentParser(prog=self.cmd_name + " download", add_help=True,
description="Downloads a machine image to the local filesystem")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the machine image to download")
mandatory.add_argument('--file', dest='file', required=True,
help="the pathname where to store the machine image")
return doParser
def do_download(self, args):
try:
# add arguments
doParser = self.arg_download()
try:
doArgs = doParser.parse_args(shlex.split(args))
except SystemExit as e:
return
# call UForge API
printer.out("Searching image with id [" + doArgs.id + "] ...", printer.INFO)
images = self.get_all_images()
if len(images) == 0:
raise ValueError("No image available")
dlImage = None
for image in images:
if str(image.dbId) == str(doArgs.id):
dlImage = image
if dlImage is not None and dlImage.status.complete and not dlImage.status.error and dlImage.compress:
download_url = self.api.getUrl() + "/" + dlImage.downloadUri
dlUtils = download_utils.Download(download_url, doArgs.file, not self.api.getDisableSslCertificateValidation())
try:
dlUtils.start()
except Exception, e:
return
printer.out("Image downloaded", printer.OK)
elif dlImage is None:
printer.out("Unable to find the image to download in your library", printer.ERROR)
elif not dlImage.status.complete:
printer.out("The image is being generated. Unable to download. Please retry later", printer.ERROR)
elif not dlImage.compress:
printer.out("The image has been prepared to be published (not compressed). Cannot download.",
printer.ERROR)
else:
printer.out("Cannot download this image", printer.ERROR)
except ArgumentParserError as e:
printer.out("In Arguments: " + str(e), printer.ERROR)
self.help_download()
return 2
except ValueError as e:
printer.out(str(e), printer.ERROR)
return 2
except Exception as e:
return handle_uforge_exception(e)
def help_download(self):
doParser = self.arg_download()
doParser.print_help()
def parse_args(self, args):
do_parser = self.arg_publish()
do_args = do_parser.parse_args(shlex.split(args))
if not do_args:
raise ArgumentParserError("No arguments")
return do_args
def get_image_status(self, status):
if (status.complete and not status.error):
imgStatus = "Done"
elif status.error:
imgStatus = "Error"
elif status.cancelled:
imgStatus = "Canceled"
else:
imgStatus = "In progress (" + str(status.percentage) + "%)"
return imgStatus
def do_publish_with_id(self, id, template):
images = self.get_all_images()
image = self.get_image(images, str(id))
if image is None:
raise ValueError("Image not found")
if not is_image_ready_to_publish(image, None):
raise ValueError("Image with name '" + image.name + "' can not be published")
source = retrieve_source_from_image(self, image)
builder = self.find_builder(image, template)
publish_image_from_builder(self, builder, template, source, 1, image)
def do_publish_without_id(self, template):
if template.has_key("stack") and template["stack"].has_key("name") and template["stack"].has_key("version"):
query_string = "name=='" + template["stack"]["name"] + "';version=='" + template["stack"]["version"] + "'"
appliance = self.retrieve_appliances_from_name_and_version(query_string)
appliance = appliance[0]
publish_all_builders(self, template, appliance)
def build_publish_image(self, image, builder, cred_account):
format_type = image.targetFormat.format.name
publish_method = getattr(publish_builders, "publish_" + generics_utils.remove_special_chars(format_type), None)
if publish_method:
publish_image = publish_method(builder, cred_account)
if publish_image is None:
raise ValueError("Could not find the builder")
else:
raise ValueError("Builder type unknown: " + format_type)
publish_image.credAccount = cred_account
return publish_image
def retrieve_appliances_from_name_and_version(self, query_string):
appliances = self.api.Users(self.login).Appliances().Getall(Query=query_string)
appliance = appliances.appliances.appliance
if appliance is None or len(appliance) != 1:
raise ValueError("No template found on the platform")
return appliance
def is_publish_image_ready_to_deploy(self, publishImage):
if not publishImage.status.complete or publishImage.status.error or publishImage.status.cancelled:
return False
return True
def find_builder(self, image, template):
for builder in template["builders"]:
if image.targetFormat.name == builder["type"]:
return builder
raise ValueError("No builder part found for image with this format type")
def get_account_name_from_template(self, template, builder):
account_name = ""
if not template.has_key("accounts"):
return ""
for account in template["accounts"]:
if account.has_key("type") and account["type"] == builder["type"] and account.has_key("name"):
account_name = account["name"]
return account_name
def get_publish_image_from_publish_id(self, publish_id):
publish_images = self.api.Users(self.login).Pimages.Getall()
publish_images = publish_images.publishImages.publishImage
if publish_images is None or len(publish_images) == 0:
raise TypeError("No published images available")
publish_image = None
for p in publish_images:
if str(p.dbId) == str(publish_id):
publish_image = p
if publish_image is None:
raise TypeError("Published image not found")
else:
return publish_image
def deploy_aws(self, deploy_file, publish_image):
attributes = check_and_get_attributes_from_file(deploy_file, ["name"])
deployment = build_deployment_aws(attributes)
deployed_instance = call_deploy(self, publish_image, deployment)
deployed_instance_id = deployed_instance.applicationId
status = show_deploy_progress_without_percentage(self, deployed_instance_id)
return print_deploy_info(self, status, deployed_instance_id)
def deploy_openstack(self, deploy_file, publish_image):
attributes = check_and_get_attributes_from_file(deploy_file, ["name", "region", "network", "flavor"])
bar_status = OpStatus()
progress = create_progress_bar_openstack(bar_status)
self.api.setTimeout(300)
cred_account = retrieve_credaccount(self, publish_image.dbId, publish_image)
self.api.setTimeout(constants.HTTP_TIMEOUT)
deployment = build_deployment_openstack(attributes, publish_image, cred_account)
bar_status.message = "Deploying instance"
bar_status.percentage = 50
progress.update(bar_status.percentage)
deployed_instance = call_deploy(self, publish_image, deployment)
deployed_instance_id = deployed_instance.applicationId
status = show_deploy_progress_with_percentage(self, deployed_instance_id, bar_status, progress)
return print_deploy_info(self, status, deployed_instance_id)
def deploy_azure(self, deploy_file, publish_image):
attributes = check_and_get_attributes_from_file(deploy_file, ["name", "userName"])
deployment = build_deployment_azure(attributes)
deployed_instance = call_deploy(self, publish_image, deployment)
deployed_instance_id = deployed_instance.applicationId
status = show_deploy_progress_without_percentage(self, deployed_instance_id)
return print_deploy_info(self, status, deployed_instance_id)
def get_all_images(self):
images = self.api.Users(self.login).Images.Getall()
images = images.images.image
if images is None or len(images) == 0:
return []
else :
return images
def get_image(self, images, image_id):
if images is None:
return None
for iimage in images:
if str(iimage.dbId) == str(image_id):
image = iimage
return image
return None
def delete_image(self, image):
if is_uri_based_on_appliance(image.uri):
appliance_id = extract_appliance_id(image.uri)
self.api.Users(self.login).Appliances(appliance_id).Images(image.dbId).Delete()
elif is_uri_based_on_scan(image.uri):
scanned_instance_id = extract_scannedinstance_id(image.uri)
scan_id = extract_scan_id(image.uri)
self.api.Users(self.login).Scannedinstances(scanned_instance_id).Scans(scan_id).Images(None, image.dbId).Delete()
else:
raise ValueError("Internal error: image cannot be deleted.")
def cancel_image(self, image):
if is_uri_based_on_appliance(image.uri):
appliance_id = extract_appliance_id(image.uri)
self.api.Users(self.login).Appliances(appliance_id).Images(image.dbId).Status.Cancel()
elif is_uri_based_on_scan(image.uri):
scanned_instance_id = extract_scannedinstance_id(image.uri)
scan_id = extract_scan_id(image.uri)
self.api.Users(self.login).Scannedinstances(scanned_instance_id).Scans(scan_id). \
Images(None, image.dbId).Status.Cancel()
printer.out("Image Canceled", printer.OK)
def initialize_text_table(self, width):
table = Texttable(width)
return table
|
{
"content_hash": "445dbff65b9efd3ba5a0cface4f22fe8",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 171,
"avg_line_length": 44.44524236983842,
"alnum_prop": 0.5891904992729036,
"repo_name": "emuus/hammr",
"id": "372b293724ef7ffc0b661e7311b17844f3529f58",
"size": "25391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hammr/commands/image/image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Dockerfile",
"bytes": "399"
},
{
"name": "Python",
"bytes": "465439"
},
{
"name": "Shell",
"bytes": "12081"
}
],
"symlink_target": ""
}
|
import time
from database.dict_converters.converter_base import ConverterBase
class MatchConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 5,
}
@classmethod
def _convert(cls, matches, dict_version):
CONVERTERS = {
3: cls.matchesConverter_v3,
}
return CONVERTERS[dict_version](matches)
@classmethod
def matchesConverter_v3(cls, matches):
return map(cls.matchConverter_v3, matches)
@classmethod
def matchConverter_v3(cls, match):
for alliance in ['red', 'blue']:
match.alliances[alliance]['team_keys'] = match.alliances[alliance].pop('teams')
match.alliances[alliance]['surrogate_team_keys'] = match.alliances[alliance].pop('surrogates')
match_dict = {
'key': match.key.id(),
'event_key': match.event.id(),
'comp_level': match.comp_level,
'set_number': match.set_number,
'match_number': match.match_number,
'alliances': match.alliances,
'winning_alliance': match.winning_alliance,
'score_breakdown': match.score_breakdown,
'videos': match.videos,
}
if match.time is not None:
match_dict['time'] = int(time.mktime(match.time.timetuple()))
else:
match_dict['time'] = None
if match.actual_time is not None:
match_dict['actual_time'] = int(time.mktime(match.actual_time.timetuple()))
else:
match_dict['actual_time'] = None
if match.predicted_time is not None:
match_dict['predicted_time'] = int(time.mktime(match.predicted_time.timetuple()))
else:
match_dict['predicted_time'] = None
if match.post_result_time is not None:
match_dict['post_result_time'] = int(time.mktime(match.post_result_time.timetuple()))
else:
match_dict['post_result_time'] = None
return match_dict
|
{
"content_hash": "2878d85222bf1a57b3211de396b06ac5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 36.872727272727275,
"alnum_prop": 0.5956607495069034,
"repo_name": "nwalters512/the-blue-alliance",
"id": "0299081d73247b01a1a776625dd482db6a289a51",
"size": "2028",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "database/dict_converters/match_converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "374878"
},
{
"name": "HTML",
"bytes": "715987"
},
{
"name": "JavaScript",
"bytes": "402170"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2080239"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
}
|
"""This example illustrates how to get all campaigns and log details to
Google Cloud Logging using a custom logging interceptor.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
from cloud_logging_interceptor import CloudLoggingInterceptor
_API_VERSION = "v12"
def main(client, customer_id):
# Instantiate the GoogleAdsService object with a custom interceptor.
ga_service = client.get_service(
"GoogleAdsService",
interceptors=[CloudLoggingInterceptor(api_version=_API_VERSION)],
)
query = """
SELECT
campaign.id,
campaign.name
FROM campaign
ORDER BY campaign.id
LIMIT 10"""
# Issues a search request using streaming.
stream = ga_service.search_stream(customer_id=customer_id, query=query)
for batch in stream:
for row in batch.results:
print(
f"Campaign with ID {row.campaign.id} and name "
f'"{row.campaign.name}" was found.'
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version=_API_VERSION)
parser = argparse.ArgumentParser(
description="Lists all campaigns for specified customer."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
|
{
"content_hash": "53edfe3c1fde375f575fd25fda7110cf",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 31.458333333333332,
"alnum_prop": 0.6388520971302428,
"repo_name": "googleads/google-ads-python",
"id": "24849b13241572696fbd2cd0df6c98cac7d04d0d",
"size": "2862",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/custom_logging_interceptor/get_campaigns.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
"""
Contains the logic for:
finding the 'ini' file, generating a configuration object
setting up a logging object
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import ast
import sys
import logging
import os.path
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
FILE_PATH = os.path.join('alarmer', 'alarmer.ini')
def find_config_file():
"""
Python normally stores 'datafiles' under the directory defined
by sys.prefix, but some operating systems (like Ubuntu) break
with this convention.
-Raises- IOError when unable to find file
-Returns- String
"""
first_guess = os.path.join(sys.prefix, FILE_PATH)
second_guess = os.path.join(sys.prefix, 'share', FILE_PATH)
if os.path.isfile(first_guess):
return first_guess
elif os.path.isfile(second_guess):
return second_guess
else:
raise IOError('Unable to find {0}'.format(FILE_PATH))
def get_config(section):
"""
A helper function for finding and constructing the configuration
object.
-Returns- Instantiated ConfigReader object
@param section
The default section to perform look ups in.
"""
config_file = find_config_file()
config = ConfigReader(config_file, section)
return config
def get_logger(name='alarmer.log', level=None, location=None, max_size=None, rollover_count=None):
"""
A factory function for making logging objects
@param name
The name of the new log file.
Default is 'alarmer.log'
@param level
How verbose the logging is. Valid Values are CRITICAL, ERROR, WARNING,
INFO, and DEBUG
@param location
The directory to save the log file under
@param max_size
The set limit of a log file size in MB. Upon exceeding this limit, the
log file rolls over.
@param rollover_count
How many historic log sets to keep
"""
if not (level and location and max_size and rollover_count):
msg = 'Missing required param(s): level {0}, location {1}, max_size {2}, rollover_count {3}'.format(level,
location,
max_size,
rollover_count)
# build base logger
logger = logging.getLogger()
logger.setLevel(level.upper())
# formatter defined
# Looks like -> 2015-12-12 15:10:15,342 - INFO [<someModule>:89] Hello World!
formatter = logging.Formatter('%(asctime)s - %(levelname)s [%(module)s:%(lineno)d] %(message)s')
# file handler construction
filename = os.path.join(name, location)
max_size = max_size * 1000 * 1000
handler = logging.handlers.RotatingFileHandler(filename,
maxBytes=max_size,
backupCount=rollover_count)
handler.setFormatter(formatter)
# finish building logger object
logger.addHandler(handler)
return logger
class ConfigReader(object):
"""
Allows for strings to be cast into other data types from
the supplied ini file.
@param config_file
The absolute file path to the configuration file
@param default_section
The area to look at in the config file when performing a look up.
Reduces boiler plate as most objects will look in the same section
for different bits of information.
"""
def __init__(self, config_file, default_section):
self._config = ConfigParser.SafeConfigParser()
self._config.read(config_file)
self.default_section = default_section
def grab(self, item, section=None, cast=True):
"""
Obtain a value from the configuration file
-Raises- ConfigParsingError when unable to find the requested value
@param item
The object in the config file to look for
@param section
Which area to look at within the config file.
Default = The supplied default_section when instantiating the ConfigReader
@param cast
ConfigReader values are always strings. When cast is True, it'll attempt
to convert the found item into a Python data type, like a float or boolean.
Casting to a function or class is not supported by design.
Default = True
"""
if section is None:
area = self.default_section
else:
area = section
try:
value = self._config.get(area, item)
except ConfigParser.NoOptionError as doh:
raise ConfigParsingError(doh)
except ConfigParser.NoSectionError as doh:
raise ConfigParsingError(doh)
if cast:
if value.title() in ('True', 'False'):
value = value.title()
try:
answer = ast.literal_eval(value)
except (SyntaxError, ValueError):
answer = value
else:
answer = value
return answer
def grab_many(self, section=None):
"""
Returns an entire section of an ini file in a dictionary.
Section values are split on commas, as a result, the dictionary values
are key -> string : value -> list of strings
-Returns-
{ 'value_name' : ['value1', 'value2']}
-Raises-
ConfigParsingError when unable to find the requested section
@param section
The section within the ini to return all items for. If not supplied
use the default section defined when instantiating the ConfigReader
object.
Default is None
"""
if section is None:
area = self.default_section
else:
area = section
try:
base = dict(self._config.items(area))
except ConfigParser.NoSectionError as doh:
raise ConfigParsingError(doh)
else:
final = {k:base[k].split(',') for k in base}
return final
class ConfigParsingError(Exception):
"""
A generic error when for failures in parsing the configuation file.
"""
pass
if __name__ == '__main__':
print(find_config_file())
|
{
"content_hash": "009af665d544b30efb08fbc626802ea4",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 123,
"avg_line_length": 31.684466019417474,
"alnum_prop": 0.5929217098207445,
"repo_name": "willnx/alarmer",
"id": "8418d7b3bc515faa17e15ff5d0ae6818c666169c",
"size": "6551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alarmer/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "526"
},
{
"name": "Python",
"bytes": "30273"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_kessel_imperial_sds_secret_ops.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_kessel_imperial_sds_secret_ops_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "dcc2681921e7e3f113132de20762d250",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 106,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.7208672086720868,
"repo_name": "obi-two/Rebelion",
"id": "ae723522e515d39cba43008d5c41c588f15c5e75",
"size": "514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_kessel_imperial_sds_secret_ops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from .dataops import *
__all__ = ['dataops']
|
{
"content_hash": "ed3a24a4c9f606e05d3341a8f61f0383",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 22,
"avg_line_length": 15.333333333333334,
"alnum_prop": 0.5869565217391305,
"repo_name": "get9/ml-test",
"id": "476add6cdb523bde8155e9c29c9298c9a8194a2c",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpleml/util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23713"
}
],
"symlink_target": ""
}
|
"""Command for restarting instances of managed instance group."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.command_lib.compute.instance_groups.managed import flags as instance_groups_managed_flags
from googlecloudsdk.command_lib.compute.instance_groups.managed import rolling_action
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class StartUpdate(base.Command):
"""Start restart instances of managed instance group."""
@staticmethod
def Args(parser):
instance_groups_managed_flags.AddMaxUnavailableArg(parser)
instance_groups_managed_flags.AddMinReadyArg(parser)
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG.AddArgument(
parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
resources = holder.resources
cleared_fields = []
with client.apitools_client.IncludeFields(cleared_fields):
minimal_action = (client.messages.InstanceGroupManagerUpdatePolicy.
MinimalActionValueValuesEnum.RESTART)
return client.MakeRequests([
rolling_action.CreateRequest(args, cleared_fields, client, resources,
minimal_action)
])
StartUpdate.detailed_help = {
'brief':
'Restarts instances in a managed instance group',
'DESCRIPTION':
"""\
*{command}* restarts instances in a managed instance group."""
}
|
{
"content_hash": "121ba43c388aa791e934b62342691424",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 109,
"avg_line_length": 37.27906976744186,
"alnum_prop": 0.7292576419213974,
"repo_name": "Sorsly/subtle",
"id": "0bb7a0c6a50839273027bab2a46260549d2a1338",
"size": "2198",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/compute/instance_groups/managed/rolling_action/restart.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
}
|
'''
Candlestick pattern functions not in class type.
'''
import numpy as np
import pandas as pd
import json
import pandas.io.data as web
from datetime import date, datetime, timedelta
from collections import defaultdict
start = datetime(2010, 1, 1)
end = date.today()
df1 = pd.read_csv('data/companylist.csv')
df2 = pd.read_csv('data/companylist1.csv')
df3 = pd.read_csv('data/companylist2.csv')
data = web.DataReader("F", 'yahoo', start, end)
symbols = np.append(df1.Symbol.values, df2.Symbol.values)
symbols = np.append(symbols, df3.Symbol.values)
symbol = 'AAPL'
c = web.DataReader(symbol, 'yahoo', start, end)
def doji(data_pt):
if float(max(data_pt['Close'], data_pt['Open']))/float(min(data_pt['Close'], data_pt['Open'])) < 1.001:
return True
else:
return False
def dragonfly_doji(data_pt):
'''
Look for a long lower shadow with a small body
(open and close are within pennies of each other).
'''
a = doji(data_pt)
b = ((data_pt['Close']-data_pt['Low'])/data_pt['Close']) > 0.03
c = similar_price(data_pt['Open'], data_pt['High'])
if a and b and c:
return True
else:
return False
def gravestone_doji(data_pt):
'''
Look for a candle with a tall upper shadow and little or no lower one.
The opening and closing prices should be within pennies of each other.
'''
a = doji(data_pt)
b = ((data_pt['High']-data_pt['Open'])/data_pt['Open']) > 0.03
c = similar_price(data_pt['Open'], data_pt['Low'])
if a and b and c:
return True
else:
return False
def long_legged_doji(data_pt):
'''
Look for a doji (opening and closing prices are within a few pennies of each other) accompanied by long shadows.
'''
a = doji(data_pt)
b = ((data_pt['High']-data_pt['Open'])/data_pt['Open']) > 0.03
c = ((data_pt['Close']-data_pt['Low'])/data_pt['Close']) > 0.03
if a and b and c:
return True
else:
return False
def body_candle(data_pt):
return abs(data_pt['Close'] - data_pt['Open'])
def black_candle(data_pt):
if (data_pt['Close'] > data_pt['Open']) and (not doji(data_pt)):
return False
else:
return True
def tall_black_candle(data_pt):
if black_candle(data_pt) and float(data_pt['Open'])/(data_pt['Close']) > 1.02:
return True
else:
return False
def small_black_candle(data_pt):
if black_candle(data_pt) and (not tall_black_candle(data_pt)):
return True
else:
return False
def white_candle(data_pt):
if (data_pt['Close'] > data_pt['Open']) and (not doji(data_pt)):
return True
else:
return False
def tall_white_candle(data_pt):
if black_candle(data_pt) and float(data_pt['Close'])/(data_pt['Open']) > 1.02:
return True
else:
return False
def small_white_candle(data_pt):
if white_candle(data_pt) and not tall_white_candle(data_pt):
return True
else:
return False
def white_marubozu_candle(data_pt):
if white_candle(data_pt) and (data_pt['Open'] == data_pt['Low']) and (data_pt['Close'] == data_pt['High']):
return True
else:
return False
def black_marubozu_candle(data_pt):
if black_candle(data_pt) and (data_pt['Open'] == data_pt['High']) and (data_pt['Close'] == data_pt['Low']):
return True
else:
return False
def closing_black_marubozu_candle(data_pt):
'''
Look for a tall black candle with an upper shadow but no lower one.
'''
if tall_black_candle(data_pt) and (data_pt['Open'] != data_pt['High']) and (data_pt['Close'] == data_pt['Low']):
return True
else:
return False
def closing_white_marubozu_candle(data_pt):
'''
Look for a tall white candle with an lower shadow but no upper one.
'''
if tall_white_candle(data_pt) and (data_pt['Open'] != data_pt['Low']) and (data_pt['Close'] == data_pt['High']):
return True
else:
return False
def black_spinning_top_candle(data_pt):
'''
Look for a small black body with shadows taller than the body.
'''
a = small_black_candle(data_pt)
b = (data_pt['Close'] - data_pt['Low']) > 2 * body_candle(data_pt)
c = (data_pt['High'] - data_pt['Open']) > 2 * body_candle(data_pt)
if a and b and c:
return True
else:
return False
def black_spinning_top_candle(data_pt):
'''
Look for a small white bodied candle with tall shadows.
'''
a = small_white_candle(data_pt)
b = (data_pt['Close'] - data_pt['Low']) > 2 * body_candle(data_pt)
c = (data_pt['High'] - data_pt['Open']) > 2 * body_candle(data_pt)
if a and b and c:
return True
else:
return False
def up_price_trend(data_pt, data_pt1, data_pt2):
'''
data_pt: the first day for the pattern
data_pt1: the day before the pattern, last day for the upward trend
data_pt2: the first day to compare as upward trend
'''
if ((data_pt1['Close'] /float(data_pt2['Open'])) > 1.03):
return True
else:
return False
def down_price_trend(data_pt, data_pt1, data_pt2):
'''
data_pt: the first day for the pattern
data_pt1: the day before the pattern, last day for the upward trend
data_pt2: the first day to compare as upward trend
'''
if ((float(data_pt2['Open']/data_pt1['Close'])) > 1.03):
return True
else:
return False
def similar_price(data_pt1,data_pt2, percent = 0.001):
a = (abs(data_pt1 - data_pt2)/(data_pt2)) < percent
if a :
return True
else:
return False
def eight_new_price(data):
for i in xrange(1,9):
if not (data.iloc[-i]['High'] > data.iloc[-i-1]['High']):
return False
if data.iloc[-9]['High'] < data.iloc[-10]['High']:
return True
else:
return False
def ten_new_price(data):
for i in xrange(1,11):
if not (data.iloc[-i]['High'] > data.iloc[-i-1]['High']):
return False
if data.iloc[-11]['High'] < data.iloc[-12]['High']:
return True
else:
return False
def twelve_new_price(data):
for i in xrange(1,13):
if not (data.iloc[-i]['High'] > data.iloc[-i-1]['High']):
return False
if data.iloc[-13]['High'] < data.iloc[-14]['High']:
return True
else:
return False
def thirteen_new_price(data):
for i in xrange(1,14):
if not (data.iloc[-i]['High'] > data.iloc[-i-1]['High']):
return False
if data.iloc[-14]['High'] < data.iloc[-15]['High']:
return True
else:
return False
def bearish_abandoned_baby(data):
a = data.iloc[-1]['Close'] < data.iloc[-1]['Open']
b = float(data.iloc[-1]['Open'])/(data.iloc[-1]['Close']) > 1.02
c = data.iloc[-1]['High'] < data.iloc[-2]['Low']
d = float(max(data.iloc[-2]['Close'], data.iloc[-2]['Open']))/float(min(data.iloc[-2]['Close'], data.iloc[-2]['Open'])) < 1.001
e = data.iloc[-2]['Low'] > data.iloc[-3]['High']
f = float(data.iloc[-3]['Close'])/(data.iloc[-3]['Open']) > 1.02
g = up_price_trend(data.iloc[-3],data.iloc[-4], data.iloc[-6])
if a and b and c and d and e and f and g:
return True
else:
return False
# if data.iloc[-1]['Close'] < data.iloc[-1]['Open']:
# if float(data.iloc[-1]['Open'])/(data.iloc[-1]['Close']) > 1.03:
# if data.iloc[-1]['High'] < data.iloc[-2]['Low']:
# if float(max(data.iloc[-2]['Close'], data.iloc[-2]['Open']))/float(min(data.iloc[-2]['Close'], data.iloc[-2]['Open'])) < 1.01:
# if data.iloc[-2]['Low'] > data.iloc[-3]['High']:
# if float(data.iloc[-3]['Close'])/(data.iloc[-3]['Open']) > 1.03:
def bullish_abandoned_baby(data):
a = data.iloc[-1]['Close'] > data.iloc[-1]['Open']
b = float(data.iloc[-1]['Close'])/(data.iloc[-1]['Open']) > 1.02
c = data.iloc[-1]['Low'] > data.iloc[-2]['High']
d = float(max(data.iloc[-2]['Close'], data.iloc[-2]['Open']))/float(min(data.iloc[-2]['Close'], data.iloc[-2]['Open'])) < 1.001
e = data.iloc[-2]['High'] < data.iloc[-3]['Low']
f = float(data.iloc[-3]['Open'])/(data.iloc[-3]['Close']) > 1.02
g = down_price_trend(data.iloc[-3],data.iloc[-4], data.iloc[-6])
if a and b and c and d and e and f and g:
return True
else:
return False
def above_stomach(data):
a = data.iloc[-2]['Close'] < data.iloc[-2]['Open']
b = data.iloc[-2]['Open']/float(data.iloc[-2]['Close']) > 1.02
c = (data.iloc[-1]['Close'] > data.iloc[-1]['Open']) and (data.iloc[-1]['Close'] > data.iloc[-2]['Open'])
d = data.iloc[-1]['Close']/float(data.iloc[-1]['Open']) > 1.02
e = data.iloc[-1]['Open'] > ((float(data.iloc[-2]['Open'])+data.iloc[-2]['Close'])/2)
f = data.iloc[-2]['Open'] > data.iloc[-1]['Open']
g = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d and e and g:
return True
else:
return False
def advance_block(data):
a = white_candle(data.iloc[-1])
b = white_candle(data.iloc[-2])
c = white_candle(data.iloc[-3])
day1_body = data.iloc[-3]['Close']/float(data.iloc[-3]['Open'])
day2_body = data.iloc[-2]['Close']/float(data.iloc[-2]['Open'])
day3_body = data.iloc[-1]['Close']/float(data.iloc[-1]['Open'])
d = day1_body > 1.03
e = (day2_body > 1.005) and ( day2_body < day1_body)
f = (day3_body > 1.005) and ( day3_body < day1_body)
g = (data.iloc[-1]['Open'] < data.iloc[-2]['Close']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Open'])
h = (data.iloc[-2]['Open'] < data.iloc[-3]['Close']) and (data.iloc[-2]['Open'] > data.iloc[-3]['Open'])
j = (data.iloc[-1]['High'] - data.iloc[-1]['Close']) > (data.iloc[-1]['Close'] - data.iloc[-1]['Open'])
k = (data.iloc[-2]['High'] - data.iloc[-2]['Close']) > (data.iloc[-2]['Close'] - data.iloc[-2]['Open'])
l = up_price_trend(data.iloc[-3],data.iloc[-4], data.iloc[-6])
if a and b and c and d and e and f and g and h and j and k and l:
return True
else:
return False
def below_stomach(data):
'''
Look for a tall white candle followed by a candle that has a body below the middle of the white candle.
The second candle as black, but the guidelines I saw did not mentions this as a requirement.
'''
a = black_candle(data.iloc[-1])
b = white_candle(data.iloc[-2])
c = data.iloc[-1]['Open']/float(data.iloc[-1]['Close']) > 1.02
d = data.iloc[-2]['Close']/float(data.iloc[-2]['Open']) > 1.02
e = (data.iloc[-1]['Open'] > data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] < (float(data.iloc[-2]['Open'])+data.iloc[-2]['Close'])/2))
f = data.iloc[-1]['Close'] < data.iloc[-2]['Open']
g = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d and e and f and g:
return True
else:
return False
def bearish_belt_hold(data):
'''
Price opens at the high for the day and closes near the low, forming a tall black candle, often with a small lower shadow.
'''
a = tall_black_candle(data.iloc[-1])
b = (data.iloc[-1]['Close']/float(data.iloc[-1]['Low']) < 1.01) and (data.iloc[-1]['Close'] < float(data.iloc[-1]['Low']))
c = (data.iloc[-1]['Open'] == data.iloc[-1]['High'])
d = white_candle(data.iloc[-2])
e = up_price_trend(data.iloc[-1],data.iloc[-2], data.iloc[-4])
if a and b and c and d and e:
return True
else:
return False
def bearish_breakaway(data):
'''
Look for 5 candle lines in an upward price trend with the first candle being a tall white one.
The second day should be a white candle with a gap between the two bodies, but the shadows can overlap.
Day three should have a higher close and the candle can be any color.
Day 4 shows a white candle with a higher close.
The last day is a tall black candle with a close within the gap between the bodies of the first two candles.
'''
a = tall_white_candle(data.iloc[-5])
b = white_candle(data.iloc[-4])
c = data.iloc[-4]['Open'] > data.iloc[-5]['Close']
d = data.iloc[-3]['Close'] > data.iloc[-4]['Close']
e = data.iloc[-2]['Close'] > data.iloc[-3]['Close']
f = white_candle(data.iloc[-2])
g = tall_black_candle(data.iloc[-1])
h = (data.iloc[-1]['Close'] < data.iloc[-4]['Open']) and (data.iloc[-1]['Close'] > data.iloc[-5]['Close'])
i = up_price_trend(data.iloc[-5],data.iloc[-6], data.iloc[-8])
if a and b and c and d and e and f and g and h and i:
return True
else:
return False
def bearish_doji_star(data):
'''
Look for a two-candle pattern in an uptrend.
The first candle is a long white one.
The next day, price gaps higher and the body remains above the prior body.
A doji forms with the opening and closing prices within pennies of each other.
The shadows on the doji should be comparatively short.
'''
a = tall_white_candle(data.iloc[-2])
b = (data.iloc[-1]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] > data.iloc[-2]['Close'])
c = doji(data.iloc[-1])
d = (data.iloc[-1]['High'] - data.iloc[-1]['Low']) < body_candle(data.iloc[-2])
e = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d and e:
return True
else:
return False
def bearish_engulfing(data):
'''
Look for a two candle pattern in an upward price trend.
The first candle is white and the second is black.
The body of the black candle is taller and overlaps the candle of the white body.
Shadows are unimportant.
'''
a = white_candle(data.iloc[-2])
b = black_candle(data.iloc[-1])
c = (data.iloc[-1]['Close'] < data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Close'])
d = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_harami(data):
'''
Look for a tall white candle followed by a small black one.
The opening and closing prices must be within the body of the white candle.
Ignore the shadows.
Either the tops of the bodies or the bottoms (or both) must be a different price.
'''
a = tall_white_candle(data.iloc[-2])
b = (black_candle(data.iloc[-1])) and (not tall_black_candle(data.iloc[-1]))
c = (data.iloc[-1]['Open'] < data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] > data.iloc[-2]['Open'])
d = (data.iloc[-1]['High'] != data.iloc[-2]['High']) or (data.iloc[-1]['Low'] != data.iloc[-2]['Low'])
e = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_harami_cross(data):
'''
Look for a tall white candle in an upward price trend.
The next day, a doji appears that is inside (including the shadows) the trading range of the white candle.
'''
a = tall_white_candle(data.iloc[-2])
b = doji(data.iloc[-1])
c = (data.iloc[-1]['High'] < data.iloc[-2]['High']) and (data.iloc[-1]['Low'] > data.iloc[-2]['Low'])
d = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_kicking(data):
'''
The first days is a white marubozu candle followed by a black marubozu. Between the two candles must be a gap.
'''
a = white_marubozu_candle(data.iloc[-2])
b = black_marubozu_candle(data.iloc[-1])
c = data.iloc[-1]['Open'] < data.iloc[-2]['Close']
if a and b and c:
return True
else:
return False
def bearish_meeting_lines(data):
'''
Look for a tall white candle in an upward price trend.
Following that, the next candle should be a tall black one.
The closes of the two candles should be "near" one another, whatever that means.
'''
a = up_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
b = tall_white_candle(data.iloc[-2])
c = tall_black_candle(data.iloc[-1])
d = (abs(data.iloc[-1]['Close'] - data.iloc[-2]['Close'])/(data.iloc[-1]['Close'])) < 0.001
if a and b and c and d:
return True
else:
return False
def bearish_separating_lines(data):
'''
Look for a tall white candle in a downward price trend followed by a tall black candle.
The opening price of the two candles should be similar.
'''
a = down_price_trend(data.iloc[-2],data.iloc[-3], data.iloc[-5])
b = tall_white_candle(data.iloc[-2])
c = tall_black_candle(data.iloc[-1])
d = (abs(data.iloc[-1]['Open'] - data.iloc[-2]['Open'])/(data.iloc[-1]['Open'])) < 0.001
if a and b and c and d:
return True
else:
return False
def bearish_side_by_side_white_lines(data):
'''
Look for a black candle in a downward price trend.
Following that, find two white candles with bodies about the same size and similar opening prices.
The closing prices of both white candles must remain below the body of the black candle.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = black_candle(data.iloc[-3])
c = white_candle(data.iloc[-2])
d = white_candle(data.iloc[-1])
e = similar_price(data.iloc[-2]['Close'],data.iloc[-1]['Close'])
f = similar_price(data.iloc[-2]['Open'],data.iloc[-1]['Open'])
g = data.iloc[-2]['Close'] < data.iloc[-3]['Close']
if a and b and c and d and e and f and g:
return True
else:
return False
def bearish_three_line_strike(data):
'''
Look for three black candles forming lower lows followed by a tall white candle that
opens below the prior close and closes above the first day's open.
In other words, the last candle spans most of the price action of the prior three days.
'''
a = down_price_trend(data.iloc[-4], data.iloc[-5], data.iloc[-7])
b = black_candle(data.iloc[-2])
c = black_candle(data.iloc[-3])
d = black_candle(data.iloc[-4])
e = (data.iloc[-2]['Low'] < data.iloc[-3]['Low']) and (data.iloc[-2]['Close'] < data.iloc[-3]['Close'])
f = (data.iloc[-3]['Low'] < data.iloc[-4]['Low']) and (data.iloc[-3]['Close'] < data.iloc[-4]['Close'])
g = tall_white_candle(data.iloc[-1])
h = (data.iloc[-1]['Open'] < data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] > data.iloc[-4]['Open'])
if a and b and c and d and e and f and g and h:
return True
else:
return False
def bearish_tri_star(data):
'''
Look for three doji candles, the middle one has a body above the other two.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = doji(data.iloc[-3])
c = doji(data.iloc[-2])
d = doji(data.iloc[-1])
e = min(data.iloc[-2]['Close'], data.iloc[-2]['Open']) > max(data.iloc[-1]['Close'], data.iloc[-1]['Open'])
f = min(data.iloc[-2]['Close'], data.iloc[-2]['Open']) > max(data.iloc[-3]['Close'], data.iloc[-3]['Open'])
if a and b and c and d and e and f:
return True
else:
return False
def bullish_belt_hold(data):
'''
Look for a white candle with no lower shadow, but closing near the high.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = white_candle(data.iloc[-1])
c = data.iloc[-1]['Low'] == data.iloc[-1]['Open']
d = similar_price(data.iloc[-1]['High'], data.iloc[-1]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_breakaway(data):
'''
Look for a series of five candles in a downtrend.
The first candle is tall and black followed by another black one that opens lower,
leaving a gap between the two bodies (but shadows can overlap).
The third day is a candle of any color but it should have a lower close.
Day four is a black candle with a lower close.
The final day is a tall white candle that closes within the body gap of the first two candles.
'''
a = down_price_trend(data.iloc[-5],data.iloc[-6], data.iloc[-8])
b = tall_black_candle(data.iloc[-5])
c = (black_candle(data.iloc[-4])) and (data.iloc[-4]['Open'] < data.iloc[-5]['Close'])
d = data.iloc[-3]['Close'] < data.iloc[-4]['Close']
e = (black_candle(data.iloc[-2])) and (data.iloc[-2]['Close'] < data.iloc[-3]['Close'])
f = tall_white_candle(data.iloc[-1])
g = (data.iloc[-1]['Close'] > data.iloc[-4]['Open']) and (data.iloc[-1]['Close'] < data.iloc[-5]['Close'])
if a and b and c and d and e and f and g:
return True
else:
return False
def bullish_doji_star(data):
'''
Look for a tall black candle on the first day followed by a doji
(where the opening and closing prices are within pennies of each other)
that gaps below the prior candle's body.
The shadows can overlap, but the doji's shadows should not be unusually long, whatever that means.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = (tall_black_candle(data.iloc[-2])) and doji(data.iloc[-1])
c = max(data.iloc[-1]['Close'], data.iloc[-1]['Open']) < data.iloc[-2]['Close']
d = (data.iloc[-1]['High']-data.iloc[-1]['Low']) < body_candle(data.iloc[-2])
if a and b and c and d:
return True
else:
return False
def bullish_engulfing(data):
'''
Look for two candles in a downward price trend.
The first is a black candle followed by a taller white one.
The white candle should have a close above the prior open and an open below the prior close.
In other words, the body of the white candle should engulf or overlap the body of the black candle.
Ignore the shadows.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = black_candle(data.iloc[-2])
c = tall_white_candle(data.iloc[-1])
d = (data.iloc[-1]['Close'] > data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] < data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_harami(data):
'''
Look for a tall black candle in a downward price trend.
The next day a white candle should be nestled within the body of the prior candle.
Ignore the shadows. The tops or bottoms of the bodies can be the same price, but not both.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = white_candle(data.iloc[1])
d = (data.iloc[-1]['Close'] < data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_harami_cross(data):
'''
Look for a two candle pattern in a downward price trend.
The first line is a tall black candle followed by a doji that fits within the high-low price range of the prior day.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = doji(data.iloc[-1])
d = (data.iloc[-1]['High'] < data.iloc[-2]['High']) and (data.iloc[-1]['Low'] < data.iloc[-2]['Low'])
if a and b and c and d:
return True
else:
return False
def bullish_kicking(data):
'''
Look for a tall black marubozu candle followed by an upward gap then a tall white marubozu candle.
'''
a = tall_black_candle(data.iloc[-2])
b = black_marubozu_candle(data.iloc[-2])
c = tall_white_candle(data.iloc[-1])
d = white_marubozu_candle(data.iloc[-1])
e = data.iloc[-1]['Low'] > data.iloc[-2]['High']
if a and b and c and d and e:
return True
else:
return False
def bullish_meeting_lines(data):
'''
Look for a tall black candle followed by a tall white candle in an upward price trend.
The two closes should be near one another.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = tall_white_candle(data.iloc[-1])
d = similar_price(data.iloc[-1]['Close'], data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_separating_lines(data):
'''
Look for a tall black candle in an upward price trend followed by a tall white candle.
The two candles share a common opening price.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = tall_white_candle(data.iloc[-1])
d = similar_price(data.iloc[-1]['Open'], data.iloc[-2]['Open'])
if a and b and c and d:
return True
else:
return False
def bullish_side_by_side_white_lines(data):
'''
Look for three white candles in an upward price trend.
The last two candles should have bodies of similar size,
open near the same price and above the top of the body of the first white candle.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = white_candle(data.iloc[-1]) and white_candle(data.iloc[-2]) and white_candle(data.iloc[-3])
c = (similar_price(data.iloc[-1]['Open'], data.iloc[-2]['Open'])) and (similar_price(data.iloc[-1]['Close'], data.iloc[-2]['Close']))
d = (data.iloc[-1]['Open'] > data.iloc[-3]['Close']) and (data.iloc[-2]['Open'] > data.iloc[-3]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_three_line_strike(data):
'''
Look for three white candles each with a higher close.
A tall black candle should open higher, but close below the open of the first candle.
'''
a = up_price_trend(data.iloc[-4], data.iloc[-5], data.iloc[-7])
b = (white_candle(data.iloc[-4])) and (white_candle(data.iloc[-3])) and (white_candle(data.iloc[-2]))
c = (data.iloc[-4]['Close'] < data.iloc[-3]['Close']) and (data.iloc[-2]['Close'] > data.iloc[-3]['Close'])
d = tall_black_candle(data.iloc[-1])
e = (data.iloc[-1]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] < data.iloc[-4]['Open'])
if a and b and c and d and e:
return True
else:
return False
def bullish_tri_star(data):
'''
Look for three doji after a downward price trend.
The middle doji has a body below the other two.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = (doji(data.iloc[-3])) and (doji(data.iloc[-2])) and (doji(data.iloc[-1]))
c = max(data.iloc[-2]['Close'], data.iloc[-2]['Open']) < min(data.iloc[-1]['Close'], data.iloc[-1]['Open'])
d = max(data.iloc[-2]['Close'], data.iloc[-2]['Open']) < min(data.iloc[-3]['Close'], data.iloc[-3]['Open'])
if a and b and c and d:
return True
else:
return False
def collapsing_doji_star(data):
'''
Look for a white candle in an upward price trend.
Following that, find a doji that gaps below yesterday's low.
The last day is a black candle that also gaps below the doji.
None of the shadows on the three candles should overlap, so there should be gaps surrounding the doji.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = white_candle(data.iloc[-3])
c = (doji(data.iloc[-2])) and (data.iloc[-2]['High'] < data.iloc[-3]['Low'])
d = (black_candle(data.iloc[-1])) and (data.iloc[-1]['High'] < data.iloc[-2]['Low'])
if a and b and c and d:
return True
else:
return False
def conceling_baby_swallow(data):
'''
Look for four black candles.
The first two are long black marubozu candles followed the next day by a candle with a tall upper shadow.
The candle gaps open downward but price trades into the body of the prior day.
The last candle engulfs the prior day, including the shadows (a higher high and lower low than the prior day).
'''
a = down_price_trend(data.iloc[-4], data.iloc[-5], data.iloc[-7])
b = (tall_black_candle(data.iloc[-4])) and (black_marubozu_candle(data.iloc[-4]))
c = (tall_black_candle(data.iloc[-3])) and (black_marubozu_candle(data.iloc[-3]))
d = black_candle(data.iloc[-2]) and ((data.iloc[-2]['High'] - data.iloc[-2]['Open']) > body_candle(data.iloc[-2]))
e = (data.iloc[-2]['Open'] < data.iloc[-3]['Close']) and (data.iloc[-2]['High'] > data.iloc[-3]['Close'])
f = (data.iloc[-1]['High'] < data.iloc[-2]['Open']) and (data.iloc[-1]['Low'] > data.iloc[-2]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def dark_cloud_cover(data):
'''
Look for two candles in an upward price trend.
The first candle is a tall white one followed by a black candle with an opening price above the top of the white candle
(an opening price above the prior high), but a close below the mid point of the white body.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_white_candle(data.iloc[-2])
c = (black_candle(data.iloc[-1])) and (data.iloc[-1]['Open'] > data.iloc[-2]['High'])
d = (data.iloc[-1]['Close'] < (data.iloc[-2]['Open'] + data.iloc[-2]['Close'])/2.
if a and b and c and d:
return True
else:
return False
def deliberation(data):
'''
Look for three white candlesticks in an upward price trend.
The first two are tall bodied candles but the third has a small body that opens near the second day's close.
Each candle opens and closes higher than the previous one.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3]) and tall_white_candle(data.iloc[-2])
c = white_candle(data.iloc[-1]) and (not tall_white_candle(data.iloc[-1]))
d = similar_price(data.iloc[-1]['Open'], data.iloc[-2]['Close'])
e = (data.iloc[-1]['Open'] > data.iloc[-2]['Open']) and (data.iloc[-2]['Open'] > data.iloc[-3]['Open'])
f = (data.iloc[-1]['Close'] > data.iloc[-2]['Close']) and (data.iloc[-2]['Close'] > data.iloc[-3]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def gapping_down_doji(data):
'''
In a downtrend, price gaps lower and forms a doji
(a candle in which the opening and closing prices are no more than a few pennies apart).
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = doji(data.iloc[-1])
c = data.iloc[-1]['High'] < data.iloc[-2]['Low']
if a and b and c:
return True
else:
return False
def gapping_up_doji(data):
'''
Price gaps higher, including the shadows, in an uptrend and forms a doji candle.
A doji is one in which the opening and closing prices are within pennies of each other.
'''
a = up_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = doji(data.iloc[-1])
c = data.iloc[-1]['Low'] > data.iloc[-2]['High']
if a and b and c:
return True
else:
return False
def northern_doji(data):
'''
Look for a candle in which the opening and closing prices are within pennies of each other (a doji) in an up trend.
'''
a = up_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = doji(data.iloc[-1])
if a and b:
return True
else:
return False
def southern_doji(data):
'''
Look for a doji candlestick (one in which the opening and closing prices are a few pennies from each other) in a downward price trend.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = doji(data.iloc[-1])
if a and b:
return True
else:
return False
def bearish_doji_star(data):
'''
Look for a two-candle pattern in an uptrend.
The first candle is a long white one.
The next day, price gaps higher and the body remains above the prior body.
A doji forms with the opening and closing prices within pennies of each other.
The shadows on the doji should be comparatively short.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_white_candle(data.iloc[-2])
c = doji(data.iloc[-1]) and (not dragonfly_doji(data.iloc[-1])) and (not gravestone_doji(data.iloc[-1])) and (not long_legged_doji(data.iloc[-1]))
d = min(data.iloc[-1]['Open'], data.iloc[-1]['Close']) > data.iloc[-1]['Close']
if a and b and c and d:
return True
else:
return False
def bullish_doji_star(data):
'''
Look for a tall black candle on the first day followed by a doji
(where the opening and closing prices are within pennies of each other)
that gaps below the prior candle's body.
The shadows can overlap, but the doji's shadows should not be unusually long, whatever that means.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = doji(data.iloc[-1]) and (not dragonfly_doji(data.iloc[-1])) and (not gravestone_doji(data.iloc[-1])) and (not long_legged_doji(data.iloc[-1]))
d = max(data.iloc[-1]['Open'], data.iloc[-1]['Close']) < data.iloc[-1]['Close']
if a and b and c and d:
return True
else:
return False
def evening_doji(data):
'''
Look for a tall white candle in an upward price trend followed by a doji whose body gaps above the two surrounding days.
Ignore the shadows. The last day is a tall black candle that closes at or below the mid point of the first day.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3])
c = doji(data.iloc[-2])
d = (min(data.iloc[-2]['Open'],data.iloc[-2]['Close']) > data.iloc[-3]['Close']) and (min(data.iloc[-2]['Open'],data.iloc[-2]['Close']) > data.iloc[-1]['Open'])
e = tall_black_candle(data.iloc[-1])
f = data.iloc[-1]['Close'] <= (data.iloc[-3]['Close'] + data.iloc[-3]['Open'])/2.
if a and b and c and d and e and f:
return True
else:
return False
def downside_gap_three_methods(data):
'''
Look for two long black bodied candles in a downward price trend.
The second candle should have a gap between them (shadows do not overlap).
The last day is a white candle that opens within the body of the prior day and
closes within the body of the first day, closing the gap between the two black candles.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = black_candle(data.iloc[-3]) and black_candle(data.iloc[-2])
c = data.iloc[-3]['Low'] > data.iloc[-2]['High']
d = white_candle(data.iloc[-1])
e = (data.iloc[-1]['Open'] < data.iloc[-2]['Open'])and (data.iloc[-1]['Open'] > data.iloc[-2]['Close'])
f = (data.iloc[-1]['Close'] < data.iloc[-3]['Open'])and (data.iloc[-1]['Close'] > data.iloc[-3]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def downside_tasuki_gap(data):
'''
Look for a black candle in a downward price trend followed by another black candle,
but this one gaps lower with no shadow overlap between the two candles.
The final day sees a white candle print on the chart,
one that opens within the body of the second candle and closes within the gap between the first and second candles.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = black_candle(data.iloc[-3]) and black_candle(data.iloc[-2])
c = data.iloc[-3]['Low'] > data.iloc[-2]['High']
d = white_candle(data.iloc[-1])
e = (data.iloc[-1]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Open'] < data.iloc[-2]['Open'])
f = (data.iloc[-1]['Close'] > data.iloc[-2]['High']) and (data.iloc[-1]['Close'] < data.iloc[-3]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def falling_three_methods(data):
'''
Look for a series of five candles in a downward price trend.
The first day should be a tall black candle followed by three up trending small white candles
(except the middle of the three, which can be either black or white),
followed by another tall black candle with a close below the first day's close.
The three middle candles should remain within the high-low range of the first candle.
'''
a = down_price_trend(data.iloc[-5], data.iloc[-6], data.iloc[-8])
b = tall_black_candle(data.iloc[-5])
c = small_white_candle(data.iloc[-4]) and small_white_candle(data.iloc[-2]) and (small_black_candle(data.iloc[-3]) or small_white_candle(data.iloc[-3]))
d = tall_black_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] < data.iloc[-5]['Close'])
e = (data.iloc[-4]['High'] < data.iloc[-5]['High']) and (data.iloc[-3]['High'] < data.iloc[-5]['High']) and (data.iloc[-2]['High'] < data.iloc[-5]['High'])
f = (data.iloc[-4]['Low'] > data.iloc[-5]['Low']) and (data.iloc[-3]['Low'] > data.iloc[-5]['Low']) and (data.iloc[-2]['Low'] > data.iloc[-5]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def falling_window(data):
'''
Find a pattern in which yesterday's low is above today's high.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = data.iloc[-2]['Low'] > data.iloc[-1]['High']
if a and b:
return True
else:
return False
def hammer(data):
'''
Look for the hammer to appear in a downward price trend and
have a long lower shadow at least two or three times the height of the body with little or no upper shadow.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = (min(data.iloc[-1]['Open'], data.iloc[-1]['Close']) - data.iloc[-1]['Low']) > 2 * body_candle(data.iloc[-1])
c = similar_price(data.iloc[-1]['High'], max(data.iloc[-1]['Open'], data.iloc[-1]['Close']))
if a and b and c:
return True
else:
return False
def inverted_hammer(data):
'''
Look for a tall black candle with a close near the day's low followed by a short candle with a tall upper shadow and little or no lower shadow.
The second candle cannot be a doji
(opening and closing prices cannot be within pennies of each other) and
the open on the second candle must be below the prior candle's close.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2]) and similar_price(data.iloc[-2]['Close'], data.iloc[-2]['Low'])
c = (not doji(data.iloc[-1])) and (small_white_candle(data.iloc[-1]) or small_black_candle(data.iloc[-1]))
d = similar_price(data.iloc[-1]['Low'], min(data.iloc[-1]['Open'], data.iloc[-1]['Close']))
e = (data.iloc[-1]['High'] - max(data.iloc[-1]['Open'], data.iloc[-1]['Close'])) > 2 * body_candle(data.iloc[-1])
f = data.iloc[-1]['Open'] < data.iloc[-2]['Close']
if a and b and c and d and e and f:
return True
else:
return False
def hanging_man(data):
'''
Look for a small bodied candle atop a long lower shadow in an uptrend.
'''
a = up_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = small_white_candle(data.iloc[-1]) or small_black_candle(data.iloc[-1])
c = hammer(data)
if a and b and c :
return True
else:
return False
def high_wave(data):
'''
Look for tall upper and lower shadows attached to a small body.
The body is not a doji (meaning that the opening and closing prices must be more than a few pennies apart.
'''
a = small_white_candle(data.iloc[-1]) or small_black_candle(data.iloc[-1])
b = not doji(data[-1])
c = (data.iloc[-1]['High'] - max(data.iloc[-1]['Open'], data.iloc[-1]['Close'])) > 2 * body_candle(data.iloc[-1])
d = (min(data.iloc[-1]['Open'], data.iloc[-1]['Close']) - data.iloc[-1]['Low']) > 2 * body_candle(data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def homing_pigeon(data):
'''
Look for a two line candle in a downward price trend.
The first day should be a tall black body followed by a small black body that fits inside the body of the prior day.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = small_black_candle(data.iloc[-1])
d = data.iloc[-1]['Close'] > data.iloc[-2]['Close']
e = data.iloc[-1]['Open'] < data.iloc[-2]['Open']
if a and b and c and d and e:
return True
else:
return False
def identical_three_crows(data):
'''
Look for three tall black candles, the last two opening near the prior candle's close.
Some sources require each candle to be similar in size, but this one is rare enough without that restriction.
'''
a = up_price_trend(data.iloc[-3], , data.iloc[-4], data.iloc[-6])
b = (tall_black_candle(data.iloc[-3])) and (tall_black_candle(data.iloc[-2])) and (tall_black_candle(data.iloc[-1]))
c = similar_price(data.iloc[-2]['Open'], data.iloc[-3]['Close']) and similar_price(data.iloc[-1]['Open'], data.iloc[-2]['Close'])
if a and b and c:
return True
else:
return False
def in_neck(data):
'''
Look for a tall black candle in a downward price trend.
The next day, a white candle opens below the black day's low, but closes just into the body of the black candle.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = white_candle(data.iloc[-1])
d = (data.iloc[-1]['Open'] < data.iloc[-2]['Low']) and (data.iloc[-1]['Close'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] < (data.iloc[-2]['Close']+data.iloc[-2]['Open'])/2.)
if a and b and c and d:
return True
else:
return False
def ladder_bottom(data):
'''
Look for a series of 5 candles in a downward price trend.
The first three days should be tall black candles, each with a lower open and close.
The 4th day should be a black candle with an upper shadow,
and the last day should be a white candle that gaps open above the body of the prior day.
'''
a = down_price_trend(data.iloc[-5], data.iloc[-6], data.iloc[-8])
b = tall_black_candle(data.iloc[-5]) and tall_black_candle(data.iloc[-4]) and tall_black_candle(data.iloc[-3])
c = (data.iloc[-4]['Close'] < data.iloc[-5]['Close']) and (data.iloc[-3]['Close'] < data.iloc[-4]['Close'])
d = (data.iloc[-4]['Open'] < data.iloc[-5]['Open']) and (data.iloc[-3]['Open'] < data.iloc[-4]['Open'])
e = black_candle(data.iloc[-2]) and (data.iloc[-2]['High'] > data.iloc[-2]['Open'])
f = white_candle(data.iloc[-1]) and (data.iloc[-1]['Open'] > data.iloc[-2]['Open'])
if a and b and c and d and e and f:
return True
else:
return False
def last_engulfing_bottom(data):
'''
Look for a white candle on the first day in a downward price trend followed by a black candle that engulfs the body of the white candle.
That means the black candle has a body this is above the top and below the bottom of the white candle.
Ignore the shadows.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = white_candle(data.iloc[-2]) and black_candle(data.iloc[-1])
c = (data.iloc[-1]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Close'] < data.iloc[-2]['Open'])
if a and b and c:
return True
else:
return False
def last_engulfing_top(data):
'''
Look for a black candle followed by a white candle that overlaps the prior black candle's body.
The white candle should have a body above the prior candle's top and below the prior candle's bottom.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = white_candle(data.iloc[-1]) and black_candle(data.iloc[-2])
c = (data.iloc[-2]['Low'] > data.iloc[-1]['Open']) and (data.iloc[-2]['High'] < data.iloc[-1]['Close'])
if a and b and c:
return True
else:
return False
def matching_low(data):
'''
Look for a black candle with a tall body.
Following that, find a black body with a close (not the low) that matches the prior close.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2]) and black_candle(data.iloc[-1])
c = data.iloc[-1]['Close'] == data.iloc[-2]['Close']
if a and b and c:
return True
else:
return False
def mat_hold(data):
'''
Look for a tall white candle to start the pattern.
The next day a small black candle has a higher close.
The third day can be any color but it is also a small candle.
The fourth day is, again, a small black candle and all three candles (days 2 to 4)
show a downward price trend but their bodies remain above the low of the first day.
The last day is another tall white candle with a close above the high of the prior four candles.
'''
a = up_price_trend(data.iloc[-5], data.iloc[-6], data.iloc[-8])
b = tall_white_candle(data.iloc[-5])
c = small_black_candle(data.iloc[-4]) and (data.iloc[-4]['Close'] > data.iloc[-5]['Close'])
d = small_black_candle(data.iloc[-3]) or small_white_candle(data.iloc[-3])
e = small_black_candle(data.iloc[-2]) and down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
f = (data.iloc[-2]['Close'] > data.iloc[-5]['Low']) and (min(data.iloc[-3]['Close'], data.iloc[-3]['Open'])> data.iloc[-5]['Low']) \
and (data.iloc[-4]['Close'] > data.iloc[-5]['Low'])
g = tall_white_candle(data.iloc[-1]) and data.iloc[-1]['Close'] > max(data.iloc[-2]['High'], data.iloc[-3]['High'], data.iloc[-4]['High'], data.iloc[-5]['High'])
if a and b and c and d and e and f and g:
return True
else:
return False
def morning_doji_star(data):
'''
Look for a tall black candle in a downward price trend.
The next day, a doji appears and its body gaps below the prior candle's body.
The final day is a tall white candle whose body gaps above the doji's.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3]) and doji(data.iloc[-2])
c = max(data.iloc[-2]['Close'], data.iloc[-2]['Open']) < data.iloc[-3]['Close']
d = tall_white_candle(data.iloc[-1]) and (data.iloc[-1]['Open'] > max(data.iloc[-2]['Close'], data.iloc[-2]['Open']))
if a and b and c and d:
return True
else:
return False
def morning_star(data):
'''
Look for a tall black candle in a downward price trend.
Following that, a small bodied candle of any color appears, one whose body gaps below the prior body.
The last day is a tall white candle that gaps above the body of the second candle and closes at least midway into the body of the first day.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3]) and (small_black_candle(data.iloc[-2]) or small_white_candle(data.iloc[-2]))
c = max(data.iloc[-2]['Open'], data.iloc[-2]['Close']) < data.iloc[-3]['Close']
d = tall_white_candle(data.iloc[-1]) and (data.iloc[-1]['Open'] > max(data.iloc[-2]['Close'], data.iloc[-2]['Open']))
if a and b and c and d:
return True
else:
return False
def on_neck(data):
'''
Look for a tall black candle in a downward price trend. Following that, a white candle has a close that matches (or nearly matches) the prior low.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = tall_black_candle(data.iloc[-2])
c = white_candle(data.iloc[-1]) and similar_price(data.iloc[-1]['Close'], data.iloc['Low'])
if a and b and c:
return True
else:
return False
def piercing_pattern(data):
'''
Look for a black candle followed by a white one that opens below the black candle’s low and closes between the midpoint of the black body and opening price.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = black_candle(data.iloc[-2]) and white_candle(data.iloc[-1])
c = data.iloc[-1]['Open'] < data.iloc[-2]['Low']
d = (data.iloc[-1]['Close'] < data.iloc[-2]['Open']) and (data.iloc[-1]['Close'] > (data.iloc[-2]['Open'] - body_candle(data.iloc[-2])/2.))
if a and b and c and d:
return True
else:
return False
def rickshaw_man(data):
'''
Look for the opening and closing prices to be within pennies of each other,
unusually tall upper and lower shadows, and the body to be near the middle of the candlestick.
'''
a = long_legged_doji(data[-1])
b = similar_price(data[-1]['Open'], (data[-1]['High'] + data[-1]['Low'])/2.) or similar_price(data[-1]['Close'], (data[-1]['High'] + data[-1]['Low'])/2.)
if a and b:
return True
else:
return False
def rising_three_methods(data):
'''
Look for a tall white candle followed by three small candles that trend lower but close within the high-low range of the first candle.
Candles 2 and 4 are black, but day 3 can be any color.
The final candle in the pattern is a tall white one that closes above the close of the first day.
'''
a = up_price_trend(data.iloc[-5], data.iloc[-6], data.iloc[-8])
b = tall_white_candle(data.iloc[-5])
c = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
d = small_black_candle(data.iloc[-4]) and (data.iloc[-4]['Close'] < data.iloc[-5]['High']) and (data.iloc[-4]['Close'] > data.iloc[-5]['Low'])
e = (small_black_candle(data.iloc[-3]) or small_white_candle(data.iloc[-3])) and (data.iloc[-3]['Close'] < data.iloc[-5]['High']) and (data.iloc[-3]['Close'] > data.iloc[-5]['Low'])
f = small_black_candle(data.iloc[-2]) and (data.iloc[-2]['Close'] < data.iloc[-5]['High']) and (data.iloc[-2]['Close'] > data.iloc[-5]['Low'])
g = tall_white_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] > data.iloc[-5]['Close'])
if a and b and c and d and e and f and g:
return True
else:
return False
def rising_window(data):
'''
Find a pattern in which yesterday's high is below today's low.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = data.iloc[-2]['High'] < data.iloc[-1]['Low']
if a and b:
return True
else:
return False
def shooting_star_1(data):
'''
Look for a small bodied candle (but not a doji) with little or no lower shadow and
a tall upper shadow at least twice the height of the body.
'''
a = up_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = small_black_candle(data.iloc[-1]) or small_white_candle(data.iloc[-1])
c = similar_price(data.iloc[-1]['Low'], min(data.iloc[-1]['Close'], data.iloc[-1]['Open']))
d = (data.iloc[-1]['High'] - max(data.iloc[-1]['Close'], data.iloc[-1]['Open']))) > 2 * body_candle(data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def shooting_star_2(data):
'''
Look for two candles in an upward price trend.
The first candle is white followed by a small bodied candle with an upper shadow at least three times the height of the body.
The candle has no lower shadow or a very small one and there is a gap between the prices of the two bodies.
The second candle can be any color.
'''
a = up_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = white_candle(data.iloc[-2])
c = small_black_candle(data.iloc[-1]) or small_white_candle(data.iloc[-1])
d = (data.iloc[-1]['High'] - max(data.iloc[-1]['Close'], data.iloc[-1]['Open']))) > 3 * body_candle(data.iloc[-1])
e = similar_price(data.iloc[-1]['Low'], min(data.iloc[-1]['Close'], data.iloc[-1]['Open']))
f = data.iloc[-1]['Low'] > data.iloc[-2]['Close']
if a and b and c and d and e and f:
return True
else:
return False
def stick_sandwich(data):
'''
Look for a black candle in a falling price trend.
The second candle is white and it trades above the close of the prior day.
The last candle is a black one that closes at or near the close of the first day.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = black_candle(data.iloc[-3]) and white_candle(data.iloc[-2]) and black_candle(data.iloc[-1])
c = (data.iloc[-2]['Low'] > data.iloc[-3]['Close'])
d = similar_price(data.iloc[-1]['Close'], data.iloc[-3]['Close'])
if a and b and c and d:
return True
else:
return False
def takuri_line(data):
'''
A small bodied candle with a lower shadow at least three times the height of the body and little or no upper shadow.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = small_black_candle(data.iloc[-1]) or small_white_candle(data.iloc[-1])
c = similar_price(data.iloc[-1]['High'], max(data.iloc[-1]['Close'], data.iloc[-1]['Open']))
d = abs(data.iloc[-1]['Low'] - min(data.iloc[-1]['Close'], data.iloc[-1]['Open'])) > 3 * body_candle(data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def three_black_crows(data):
'''
Look for three tall black candles that appear in an upward price trend.
Candles 2 and 3 of the pattern should open within the body of the prior candle,
and all three should close near their lows, making new lows along the way.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3]) and tall_black_candle(data.iloc[-2]) and tall_black_candle(data.iloc[-1])
c = (data.iloc[-2]['Open'] > data.iloc[-3]['Close']) and (data.iloc[-2]['Open'] < data.iloc[-3]['Open'])
d = (data.iloc[-1]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-1]['Open'] < data.iloc[-2]['Open'])
e = similar_price(data.iloc[-3]['Low'], data.iloc[-3]['Close']) and similar_price(data.iloc[-2]['Low'], data.iloc[-2]['Close']) and similar_price(data.iloc[-1]['Low'], data.iloc[-1]['Close'])
f = (data.iloc[-3]['Low'] > data.iloc[-2]['Low']) and (data.iloc[-2]['Low'] > data.iloc[-1]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def three_inside_down(data):
'''
Look for a tall white candle in an upward price trend.
Following that, a small black candle appears with the open and close within the body of the first day.
The tops or bottoms of the two bodies can be the same price, but not both.
The last day must close lower, but can be any color.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3])
c = small_black_candle(data.iloc[-2])
d = (data.iloc[-2]['Open'] < data.iloc[-3]['Close']) and (data.iloc[-2]['Close'] > data.iloc[-3]['Open'])
e = (data.iloc[-1]['Close'] < data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def three_inside_up(data):
'''
Look for a tall black candle in a downward price trend.
The next day, a small bodied white candle has a body that is within the body of the prior candle.
The tops or bottoms of the bodies can be the same price, but not both.
The last day is a white candle that closes above the prior close.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3])
c = small_white_candle(data.iloc[-2])
d = (data.iloc[-2]['Open'] > data.iloc[-3]['Close']) and (data.iloc[-2]['Close'] < data.iloc[-3]['Open'])
e = white_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] > data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def three_outside_down(data):
'''
Look for a white candle in an upward price trend.
Following that, a black candle opens higher and closes lower than the prior candle's body.
The last day is a candle with a lower close.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = white_candle(data.iloc[-3])
c = black_candle(data.iloc[-2]) and (data.iloc[-2]['Open'] > data.iloc[-2]['Close']) and (data.iloc[-2]['Close'] < data.iloc[-2]['Open'])
d = data.iloc[-1]['Close'] < data.iloc[-2]['Close']
if a and b and c and d:
return True
else:
return False
def three_outside_up(data):
'''
Look for a black candle in a downward price trend.
Following that, a white candle opens below the prior body and closes above it, too.
The last day is a candle in which price closes higher, according to Morris who developed the candle.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = black_candle(data.iloc[-3])
c = white_candle(data.iloc[-2]) and (data.iloc[-2]['Open'] < data.iloc[-3]['Close']) and (data.iloc[-2]['Close'] < data.iloc[-3]['Open'])
d = data.iloc[-1]['Close'] > data.iloc[-2]['Close']
if a and b and c and d:
return True
else:
return False
def three_stars_in_south(data):
'''
Look for a tall black candle with a long lower shadow to appear in a downward price trend.
The second day should be similar to the first day, but smaller and with a higher low.
The last day is a black marubozu that squeezes inside the high-low range of the prior day.
Good luck finding one.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3]) and ((data.iloc[-3]['Close']-data.iloc[-3]['Low']) > body_candle(data.iloc[-3]))
c = tall_black_candle(data.iloc[-2]) and ((data.iloc[-2]['Close']-data.iloc[-2]['Low']) > body_candle(data.iloc[-2]))
d = data.iloc[-2]['Low'] > data.iloc[-3]['Low']
e = black_marubozu_candle(data.iloc[-1]) and (data.iloc[-1]['High'] < data.iloc[-2]['High']) and (data.iloc[-1]['Low'] > data.iloc[-2]['Low'])
if a and b and c and d and e:
return True
else:
return False
def three_white_soldiers(data):
'''
Look for three tall white candles, each with a close near the high, higher closes, and
bodies that overlap (an opening price within the prior candle's body.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3]) and tall_white_candle(data.iloc[-2]) and tall_white_candle(data.iloc[-1])
c = similar_price(data.iloc[-3]['High'], data.iloc[-3]['Close']) and similar_price(data.iloc[-2]['High'], data.iloc[-2]['Close']) and similar_price(data.iloc[-1]['High'], data.iloc[-1]['Close'])
d = (data.iloc[-3]['High'] < data.iloc[-2]['High']) and (data.iloc[-2]['High'] < data.iloc[-1]['High'])
e = (data.iloc[-2]['Open'] > data.iloc[-3]['Open']) and (data.iloc[-2]['Open'] < data.iloc[-3]['Close'])
f = (data.iloc[-1]['Open'] > data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] < data.iloc[-2]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def thrusting(data):
'''
Look for a black candle in a downward price trend followed by a white candle that
opens below the prior low but closes near but below the midpoint of the black candle's body.
'''
a = down_price_trend(data.iloc[-2], data.iloc[-3], data.iloc[-5])
b = black_candle(data.iloc[-2]) and white_candle(data.iloc[-1])
c = (data.iloc[-1]['Open'] < data.iloc[-2]['Low']) and (data.iloc[-1]['Close'] < (data.iloc[-2]['Open'] - body_candle(data.iloc[-2])/2.)) and \
(data.iloc[-1]['Close'] > (data.iloc[-2]['Close'] + body_candle(data.iloc[-2])/4.)))
if a and b and c:
return True
else:
return False
def tweezers_bottom(data):
'''
Look for two candles sharing the same low price.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = data.iloc[-1]['Low'] == data.iloc[-2]['Low']
if a and b:
return True
else:
return False
def tweezers_top(data):
'''
Look for two adjacent candlesticks with the same (or nearly the same) high price in an uptrend.
'''
a = up_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = similar_price(data.iloc[-1]['High'], data.iloc[-2]['High'])
if a and b:
return True
else:
return False
def two_black_gapping(data):
'''
Look for a price gap followed by two black candles.
The second black candle should have a high below the prior candle's high.
'''
a = down_price_trend(data.iloc[-1], data.iloc[-2], data.iloc[-4])
b = black_candle(data.iloc[-2]) and black_candle(data.iloc[-1])
c = data.iloc[-2]['High'] < data.iloc[-3]['Low']
d = data.iloc[-1]['High'] < data.iloc[-2]['High']
if a and b and c and d:
return True
else:
return False
def two_crows(data):
'''
Look for a tall white candle in an upward price trend.
Following that, a black candle has a body that gaps above the prior candle's body.
The last day is another black candle, but this one opens within the prior candle's body and closes within the body of the first candle in the pattern.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3])
c = black_candle(data.iloc[-2]) and (data.iloc[-2]['Close'] > data.iloc[-3]['Close'])
d = black_candle(data.iloc[-1]) and (data.iloc[-1]['Open'] < data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Close'])
e = (data.iloc[-2]['Close'] > data.iloc[-3]['Open']) and (data.iloc[-2]['Close'] < data.iloc[-3]['Close'])
if a and b and c and d and e:
return True
else:
return False
def unique_three_river_bottom(data):
'''
Look for a tall bodied black candle in a downward price trend.
Following that, another black body rests inside the prior body, but the lower shadow is below the prior day's low.
The last day is a short bodied white candle that remains below the body of the prior candle.
'''
a = down_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_black_candle(data.iloc[-3])
c = black_candle(data.iloc[-2]) and (data.iloc[-2]['Low'] < data.iloc[-3]['Low'])
d = (data.iloc[-2]['Open'] < data.iloc[-3]['Open']) and (data.iloc[-2]['Close'] < data.iloc[-3]['Close'])
e = small_white_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] < data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def upside_gap_three_methods(data):
'''
Look for two tall white candles in an upward price trend.
There should be a gap between them, including between the shadows.
The last day is a black candle that fills the gap created by the first two days.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3]) and tall_white_candle(data.iloc[-2])
c = data.iloc[-3]['High'] < data.iloc[-2]['Low']
d = black_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] < data.iloc[-3]['Close']) and (data.iloc[-1]['Close'] > data.iloc[-3]['Open'])
e = (data.iloc[-1]['Open'] < data.iloc[-2]['Close']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Open'])
if a and b and c and d and e:
return True
else:
return False
def upside_gap_two_crows(data):
'''
Look for a tall white candle in an upward price trend.
Then find a black candle with a body gapping above the prior candle's body.
The last day is another black candle that engulfs the body of the middle day with a close that
remains above the close of the first candle.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = tall_white_candle(data.iloc[-3])
c = black_candle(data.iloc[-2]) and (data.iloc[-3]['Close'] < data.iloc[-2]['Close'])
d = black_candle(data.iloc[-1]) and (data.iloc[-1]['Close'] < data.iloc[-2]['Close']) and (data.iloc[-1]['Open'] > data.iloc[-2]['Open'])
e = data.iloc[-2]['Close'] > data.iloc[-3]['Close']
if a and b and c and d and e:
return True
else:
return False
def upside_tasuki_gap(data):
'''
Look for a white candle in an upward price trend.
Following that, find another white candle, but this one gaps higher and that includes a gap between the shadows of the two candles.
The last day is a black candle that opens in the body of the prior candle and closes within the gap created between the first two candles.
'''
a = up_price_trend(data.iloc[-3], data.iloc[-4], data.iloc[-6])
b = white_candle(data.iloc[-3])
c = white_candle(data.iloc[-2]) and (data.iloc[-2]['Low'] > data.iloc[-3]['High'])
d = black_candle(data.iloc[-1]) and (data.iloc[-1]['Open'] > data.iloc[-2]['Open']) and (data.iloc[-1]['Open'] < data.iloc[-2]['Close'])
e = (data.iloc[-1]['Close'] > data.iloc[-3]['Close']) and (data.iloc[-1]['Close'] < data.iloc[-2]['Open'])
if a and b and c and d and e:
return True
else:
return False
|
{
"content_hash": "328ead57ebed41218dfb8d13414d9ae2",
"timestamp": "",
"source": "github",
"line_count": 1494,
"max_line_length": 198,
"avg_line_length": 43.74029451137885,
"alnum_prop": 0.6082665115994369,
"repo_name": "kennethcc2005/yahoo_finance_stocks",
"id": "29d3060001fd6912828a6382e64bdde4391e0eb0",
"size": "65350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "candlestick_pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "197797"
},
{
"name": "Python",
"bytes": "232020"
}
],
"symlink_target": ""
}
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class helloApp(CMSApp):
name = _("Hello App") # give your app a name, this is required
urls = ["helloApp.urls"] # link your app to url configuration(s)
apphook_pool.register(helloApp) # register your app
|
{
"content_hash": "134663953f833da7163ae76306f2e159",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 34.8,
"alnum_prop": 0.735632183908046,
"repo_name": "samirasnoun/django_cms_gallery_image",
"id": "88dd8513d79510a3e6c39bcfce04287065cffa95",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helloApp/cms_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245718"
},
{
"name": "JavaScript",
"bytes": "1060264"
},
{
"name": "Makefile",
"bytes": "2973"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3309714"
},
{
"name": "Ruby",
"bytes": "1980"
},
{
"name": "XSLT",
"bytes": "10244"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntry, AuditLogEntryEvent, Project
BLANK_CHOICE = [("", "")]
class AddProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), max_length=200,
widget=forms.TextInput(attrs={
'placeholder': _('i.e. API, Frontend, My Application Name'),
}),
help_text='Using the repository name generally works well.',
)
class Meta:
fields = ('name',)
model = Project
def save(self, actor, team, ip_address):
project = super(AddProjectForm, self).save(commit=False)
project.team = team
project.organization = team.organization
project.save()
AuditLogEntry.objects.create(
organization=project.organization,
actor=actor,
ip_address=ip_address,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
return project
|
{
"content_hash": "9eaf162702688f6e5264415a9ce6fa0a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.6271338724168913,
"repo_name": "daevaorn/sentry",
"id": "a17ca62553d6ed681d1600ddb60d6206f2a803c3",
"size": "1113",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/web/forms/add_project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174905"
},
{
"name": "HTML",
"bytes": "200247"
},
{
"name": "JavaScript",
"bytes": "618375"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "8680827"
},
{
"name": "Shell",
"bytes": "746"
}
],
"symlink_target": ""
}
|
''' Sample usage of function 'inventory_unmounted' to show which devices in the configuration are not mounted.
Print the function's documentation then invoke the function and print the output.
'''
from __future__ import print_function as _print_function
from basics.inventory import inventory_unmounted
from pydoc import render_doc as doc
from pydoc import plain
def main():
print(plain(doc(inventory_unmounted)))
print(inventory_unmounted())
if __name__ == "__main__":
main()
|
{
"content_hash": "c13ea96b963672bbe23bec78d09fd221",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 110,
"avg_line_length": 31,
"alnum_prop": 0.7399193548387096,
"repo_name": "tbarrongh/cosc-learning-labs",
"id": "81bf09aadb40904c8598602377eb9b5d761fa7ae",
"size": "1099",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/learning_lab/01_inventory_unmounted.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "354065"
},
{
"name": "Shell",
"bytes": "2128"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import mopidy_test
import pygst
pygst.require('0.10')
import gst
import gobject
from mopidy import config, exceptions, ext
__author__ = 'Andrew Jackson'
__email__ = 'andrewderekjackson@gmail.com'
__version__ = '0.1.0'
# If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-Test'
ext_name = 'test'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
return schema
def get_command(self):
pass
def validate_environment(self):
pass
def setup(self, registry):
# Register a frontend
from .mopidy_test import TestFrontend
registry.add('frontend', TestFrontend)
pass
|
{
"content_hash": "05ac42e61015f8733e73d838d7877b8b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 22.76,
"alnum_prop": 0.6546572934973638,
"repo_name": "andrewderekjackson/mopidy_webradio",
"id": "9c80199597fefa2870e9fcd7e175e08b0d52df9f",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1712"
},
{
"name": "Python",
"bytes": "28286"
}
],
"symlink_target": ""
}
|
"""Utilities for loading datasets for training."""
import functools
from typing import Dict, Any, Tuple
from absl import logging
import jax.numpy as jnp
import ml_collections as collections
import tensorflow as tf
import data as cpdata
def apply_cifar_augmentation(
config: collections.ConfigDict, ds: tf.data.Dataset,
shape: Tuple[int, int, int]) -> tf.data.Dataset:
"""Applies data augmentation for CIFAR dataset.
Args:
config: training configuration
ds: dataset to apply augmentation to
shape: image shape
Returns:
Augmented dataset.
"""
if config.cifar_augmentation == 'standard':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
ds = ds.map(standard_fn)
elif config.cifar_augmentation == 'autoaugment':
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
ds = ds.map(autoaugment_fn)
elif config.cifar_augmentation == 'standard+cutout':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
cutout_fn = functools.partial(cpdata.augment_cutout, replace=121, pad=8)
ds = ds.map(standard_fn)
ds = ds.map(cutout_fn)
elif config.cifar_augmentation == 'standard+autoaugment':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
ds = ds.map(standard_fn)
ds = ds.map(autoaugment_fn)
elif config.cifar_augmentation == 'standard+autoaugment+cutout':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
cutout_fn = functools.partial(cpdata.augment_cutout, replace=121, pad=8)
ds = ds.map(standard_fn)
ds = ds.map(autoaugment_fn)
ds = ds.map(cutout_fn)
else:
raise ValueError('Invalid augmentation for CIFAR10.')
return ds
def get_data_stats(config: collections.ConfigDict) -> Dict[str, Any]:
"""Get data statistics for selected dataset.
Retrieves data sizes, shapes and whitening statistics based on the
dataset selected in config.dataset.
Args:
config: training configuration
Returns:
Dictionary containing statistics of loaded data split.
"""
data = {}
if config.dataset == 'wine_quality':
data['classes'] = 2
train_examples = int(5000*0.8) - config.val_examples
test_examples = 5000 - config.val_examples - train_examples
data['sizes'] = {
'train': train_examples,
'val': config.val_examples,
'test': test_examples,
}
data['shape'] = (1, 1, 11)
data['means'] = [
10.532083, 0.04565686, 0.33281144, 0.99399555, 6.850714,
35.23343, 3.187603, 6.373672, 0.49019712, 138.01242, 0.27974856
]
data['stds'] = [
1.2350279, 0.022253787, 0.119335935, 0.003012671, 0.85485053,
17.152323, 0.15184218, 5.0720124, 0.11392499, 42.492615, 0.102494776
]
elif config.dataset == 'mnist':
data['classes'] = 10
data['sizes'] = {
'train': 60000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'emnist_byclass':
# For evaluation, we want to keep the number of test examples and validation
# examples down, because >10-20k test examles slows down evaluation
# considerably, and we run into OOM problems.
data['classes'] = 26 * 2
data['sizes'] = {
'train': 104000 - config.val_examples, # = 52 * 2000
'val': config.val_examples,
'test': 10400, # = 52 * 200
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'fashion_mnist':
data['classes'] = 10
data['sizes'] = {
'train': 60000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'cifar10':
data['classes'] = 10
data['sizes'] = {
'train': 50000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (32, 32, 3)
data['means'] = [0.49137254902, 0.482352941176, 0.446666666667]
data['stds'] = [0.247058823529, 0.243529411765, 0.261568627451]
elif config.dataset == 'cifar100':
data['classes'] = 100
data['sizes'] = {
'train': 50000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.491399755166, 0.4821585592989, 0.446530913373]
data['stds'] = [0.2470322514179, 0.2434851647, 0.2615878392604]
else:
raise ValueError('Invalid dataset.')
data['means'] = jnp.array(data['means'])
data['stds'] = jnp.array(data['stds'])
return data
def _check_batch_sizes(config: collections.ConfigDict, data: Dict[str, Any]):
"""Helper to check whether dataset sizes are divisible by batch sizes.
Args:
config: training configuration
data: datasets and sizes
"""
for key, batch_size in zip([
'train', 'test', 'val'
], [
config.batch_size, config.test_batch_size, config.test_batch_size,
]):
if data['sizes'][key] % batch_size != 0:
raise ValueError(
'Trying to do conformal training with batch size %d '
'but %s set size %d is not divisible by the batch size '
'(and drop_remainder is False).' % (
batch_size, key, data['sizes'][key],
))
def _batch_sets(
config: collections.ConfigDict, data: Dict[str, Any], drop_remainder: bool):
"""Helper to take care of training set shuffling.
Args:
config: training configuration
data: datasets and sizes
drop_remainder: whether to drop the remaining examples if they
cannot fill a full batch
"""
# For some datasets, we need to drop any batch that is smaller than
# the requested batch size at the end. This is because, for conformal
# training, the batch size is fixed due to the smooth sorting component used.
# So, to be fair, we just drop any batch at the end.
if data['sizes']['train'] % config.batch_size != 0:
drop_remainder = True
logging.warning(
'dropping last batch as %d training examples not divisible '
'by %d batch size!', data['sizes']['train'], config.batch_size)
# Unshuffled and clean versions for computing logits in a
# deterministic way.
data['train_ordered'] = data['train'].batch(
config.batch_size, drop_remainder=drop_remainder)
data['train_clean'] = data['train_clean'].batch(
config.batch_size, drop_remainder=drop_remainder)
# We allow to run cross-validation like experiments by repeating the
# training set X times, shuffling and then taking the first
# examples. This creates a training set of same size but
# emulates sampling with up to config.resampling replacements.
if config.resampling:
if config.resampling <= 1:
raise ValueError('Cannot resample training set once or less often.')
data['train'] = data['train'].repeat(config.resampling)
data['train'] = data['train'].shuffle(
config.resampling * data['sizes']['train'], seed=config.seed)
data['train'] = data['train'].take(data['sizes']['train'])
else:
data['train'] = data['train'].shuffle(
data['sizes']['train'], seed=config.seed)
data['train'] = data['train'].batch(
config.batch_size, drop_remainder=drop_remainder)
if data['val'] is not None:
data['val'] = data['val'].batch(
config.test_batch_size, drop_remainder=drop_remainder)
data['test'] = data['test'].batch(
config.test_batch_size, drop_remainder=drop_remainder)
if not drop_remainder:
_check_batch_sizes(config, data)
def get_data(config: collections.ConfigDict) -> Dict[str, Any]:
"""Get data for training and testing.
Args:
config: training configuration
Returns:
Dictionary containing training and test datasets, number of classes,
and mean and std per channel for training dataset.
"""
def map_mnist_cifar(batch):
"""Mapping for image int to float on MNIST/CIFAR."""
return {
'image': tf.cast(batch['image'], tf.float32) / 255.,
'label': batch['label'],
}
def map_emnist_byclass_transpose_and_labels(batch):
"""Helper to map labels for EMNIST/byClass."""
return {
'image': tf.cast(
tf.transpose(batch['image'], perm=[1, 0, 2]), tf.float32) / 255.,
'label': batch['label'] - 10,
}
def filter_emnist_byclass(batch):
"""Helper to filter out digits in EMNIST/byClass."""
return batch['label'] >= 10
def map_wine_quality_expand_and_relabel(batch):
"""Helper to expand features to image size for win quality."""
keys = [
'alcohol',
'chlorides',
'citric acid',
'density',
'fixed acidity',
'free sulfur dioxide',
'pH',
'residual sugar',
'sulphates',
'total sulfur dioxide',
'volatile acidity',
]
features = tf.stack(
[tf.cast(batch['features'][k], tf.float32) for k in keys], axis=0)
return {
'image': tf.cast(tf.reshape(features, (1, 1, -1)), tf.float32),
'label': 1 if batch['quality'] >= 6 else 0,
}
data = get_data_stats(config)
drop_remainder = False
if config.dataset == 'wine_quality':
train_examples = data['sizes']['train']
val_examples = data['sizes']['val']
data_split = cpdata.create_data_split(
'wine_quality/white',
train_examples, val_examples, padding_size=5000)
data['train'] = data_split['train'].map(map_wine_quality_expand_and_relabel)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['val'].map(map_wine_quality_expand_and_relabel)
data['test'] = data_split['test'].map(map_wine_quality_expand_and_relabel)
data['train_clean'] = data['train']
# Adapt data split to avoid check on batch size below.
data_split['sizes'] = data['sizes']
elif config.dataset == 'emnist_byclass':
# The validation example number is a fix for type checking:
# We want data_split['val'] to be None if val_examples=0, otherwise
# type checks below will fail.
# So we request 1 validation examples if val_examples > 0 and 0 else.
train_examples = data['sizes']['train']
val_examples = data['sizes']['val']
test_examples = data['sizes']['test']
data_split = cpdata.load_data_split(
'emnist/byclass', val_examples=min(config.val_examples, 1))
# Train and validation set is created from the provided train dataset
# by filtering, mapping and then taking train_examples + val_examples.
data['train'] = data_split['train'].filter(filter_emnist_byclass)
data['train'] = data['train'].map(map_emnist_byclass_transpose_and_labels)
data['train'] = data['train'].take(train_examples + val_examples)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['train'].skip(train_examples)
# Important to take after defining the validation set!
data['train'] = data['train'].take(train_examples)
data['test'] = data_split['test'].filter(filter_emnist_byclass)
data['test'] = data['test'].map(map_emnist_byclass_transpose_and_labels)
data['test'] = data['test'].take(test_examples)
data['train_clean'] = data['train']
# Adapt data split to avoid check on batch size below.
data_split['sizes'] = data['sizes']
elif config.dataset in ['mnist', 'fashion_mnist', 'cifar10', 'cifar100']:
data_split = cpdata.load_data_split(
config.dataset, val_examples=config.val_examples)
# We need to apply data augmentation before the mapping as the mapping
# divides by 255 (which was before done in load_batches), but
# data augmentation operates on proper images, not floats.
data['train'] = data_split['train']
if config.dataset.find('cifar') >= 0:
logging.info('Adding data augmentation for CIFAR.')
data['train'] = apply_cifar_augmentation(
config, data['train'], data_split['shape'])
data['train'] = data['train'].map(map_mnist_cifar)
# Dataset without data augmentation:
data['train_clean'] = data_split['train'].map(map_mnist_cifar)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['val'].map(map_mnist_cifar)
data['test'] = data_split['test'].map(map_mnist_cifar)
else:
raise ValueError('Invalid dataset.')
data['sizes'] = data_split['sizes']
data['shape'] = data_split['shape']
# This takes care of shuffling, batching and resampling with replacement
# if requested.
_batch_sets(config, data, drop_remainder)
return data
|
{
"content_hash": "fe0c1fe361021baf6f6c70a3f021e065",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 80,
"avg_line_length": 36.929577464788736,
"alnum_prop": 0.6425629290617849,
"repo_name": "deepmind/conformal_training",
"id": "7b87a27a1ae8c80c4816fe61bea4ccc38e372f1c",
"size": "13784",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "data_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "318129"
},
{
"name": "Shell",
"bytes": "1012"
}
],
"symlink_target": ""
}
|
import os
def prtcl():
result=os.popen('iptables -L INPUT |grep -iv "chain"|grep -iv "prot"| awk \'{print $2}\'',"r").read()
return result
def src():
result=os.popen(' iptables -L INPUT|grep -iv "chain"|grep -iv "prot" | awk \'{print $4}\'',"r").read()
return result
def des():
result=os.popen('iptables -L INPUT |grep -iv "chain"|grep -iv "prot"| awk \'{print $5}\'',"r").read()
return result
def pol():
result=os.popen('iptables -L INPUT |grep -iv "chain"|grep -iv "prot"| awk \'{print $1}\'',"r").read()
return result
def misc():
result=os.popen('iptables -L INPUT|grep -iv "chain"|grep -iv "prot" | awk \'{print $NF}\'',"r").read()
return result
|
{
"content_hash": "57b13c60c20dd23ed5fb8375db0202d0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 103,
"avg_line_length": 27.916666666666668,
"alnum_prop": 0.6149253731343284,
"repo_name": "d4rkl0rd3r3b05/Firewall",
"id": "3b43214faac763e98798a65000f8b99c897b7565",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Firewall/polinptmeth.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75703"
}
],
"symlink_target": ""
}
|
"""
WSGI config for job_scheduler_web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "job_scheduler_web.settings")
application = get_wsgi_application()
|
{
"content_hash": "334cf458b56e638b3154b2ab1f2a5e35",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.75,
"alnum_prop": 0.7718446601941747,
"repo_name": "ewerkema/job-scheduler",
"id": "9890706e5ffa3a56287a5e1844dd87573c5d66b6",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job_scheduler_web/job_scheduler_web/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4954"
},
{
"name": "HTML",
"bytes": "12288"
},
{
"name": "Java",
"bytes": "92845"
},
{
"name": "Python",
"bytes": "27724"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.