prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
"""
Django settings for hello project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')nw@1z2xt-dy2f$1mfpzyuohxv-tmu4+5-q55)*(e6obam-p=4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
's3direct',
'cat',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemp | lates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
| },
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# If AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are not defined,
# django-s3direct will attempt to use the EC2 instance profile instead.
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME', 'test-bucket')
S3DIRECT_REGION = os.environ.get('S3DIRECT_REGION', 'us-east-1')
def create_filename(filename):
import uuid
ext = filename.split('.')[-1]
filename = '%s.%s' % (uuid.uuid4().hex, ext)
return os.path.join('custom', filename)
S3DIRECT_DESTINATIONS = {
# Allow anybody to upload any MIME type
'misc': {
'key': '/'
},
# Allow staff users to upload any MIME type
'pdfs': {
'key': 'uploads/pdfs',
'auth': lambda u: u.is_staff
},
# Allow anybody to upload jpeg's and png's. Limit sizes to 5kb - 20mb
'images': {
'key': 'uploads/images',
'auth': lambda u: True,
'allowed': [
'image/jpeg',
'image/png'
],
'content_length_range': (5000, 20000000),
},
# Allow authenticated users to upload mp4's
'videos': {
'key': 'uploads/videos',
'auth': lambda u: u.is_authenticated(),
'allowed': ['video/mp4']
},
# Allow anybody to upload any MIME type with a custom name function
'custom_filename': {
'key': create_filename
},
}
|
"""
"""
from .register import get_registered_layers
#custom layer import begins
import axpy
import flatten
import argmax
import reshape
import roipooling
import priorbox
import permute
import detection_out
import normalize
import select
import crop
import reduction
#custom layer import ends
custom_layers = get_registered_layers()
def set_args(f, params, node=None):
""" set args for function 'f' using the parameters in node.layer.parameters
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
from ..protobuf_to_dict import protobuf_to_dict
argc = f.__code__.co_argcount
arg_list = f.__code__.co_varnames[0:argc]
kwargs = {}
for arg_name in arg_list:
if arg_name in params:
| kwargs[arg_name | ] = params[arg_name]
if node is not None and len(node.metadata):
kwargs.update(node.metadata)
return arg_list, kwargs
def has_layer(kind):
""" test whether this layer exists in custom layer
"""
return kind in custom_layers
def compute_output_shape(kind, node):
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
shape_func = custom_layers[kind]['shape']
parents = node.parents
inputs = [list(p.output_shape) for p in parents]
arg_names, kwargs = set_args(shape_func, node.params)
if len(inputs) == 1:
inputs = inputs[0]
return shape_func(inputs, **kwargs)
def make_node(template, kind, node):
""" make a PaddleNode for custom layer which means construct
a piece of code to define a layer implemented in 'custom_layers'
Args:
@template (PaddleNode): a factory to new a instance of PaddleNode
@kind (str): type of custom layer
@node (graph.Node): a layer in the net
Returns:
instance of PaddleNode
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
#construct arguments needed by custom layer function from node's parameters
arg_names, kwargs = set_args(layer_func, node.params, node)
return template('custom_layer', kind, **kwargs)
def make_custom_layer(kind, inputs, name, *args, **kwargs):
""" execute a custom layer which is implemented by users
Args:
@kind (str): type name of this layer
@inputs (vars): variable list created by fluid
@namme (str): name for this layer
@args (tuple): other positional arguments
@kwargs (dict): other kv arguments
Returns:
output (var): output variable for this layer
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
return layer_func(inputs, name, *args, **kwargs)
|
org'])
make_loc(session, name=loc_name, subnets=[subnet_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_subnets'])
element = session.nav.wait_until_element(
(strategy1, value1 % subnet_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, subnets=[subnet_name])
self.location.search(loc_name).click()
self.location.click(tab_locators['context.tab_subnets'])
element = session.nav.wait_until_element(
(strategy, value % subnet_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_domain(self):
"""Add a domain to an location and remove it by location name
and domain name
@feature: Locations
@assert: the domain is removed from the location
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for domain_name in generate_strings_list():
with self.subTest(domain_name):
loc_name = gen_string('alpha')
domain = entities.Domain(name=domain_name).create()
self.assertEqual(domain.name, domain_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, domains=[domain_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_domains'])
element = session.nav.wait_until_element(
(strategy1, value1 % domain_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, domains=[domain_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_domains'])
element = session.nav.wait_until_element(
(strategy, value % domain_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_user(self):
"""Create admin users then add user and remove it by using the
location name
@feature: Locations
@assert: The user is added then removed from the location
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
# User names does not accept html values
for user_name in generate_strings_list(
length=10,
exclude_types=['html']):
with self.subTest(user_name):
loc_name = gen_string('alpha')
user = entities.User(
login=user_name,
firstname=user_name,
lastname=user_name,
password=gen_string('alpha'),
).create()
self.assertEqual(user.login, user_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, users=[user_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_users'])
element = session.nav.wait_until_element(
(strategy1, value1 % user_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, users=[user_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_users'])
element = session.nav.wait_until_element(
(strategy, value % user_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
| self.assertIsNotNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_hostgroup(self):
"""Add a hostgroup and remove it by using the location name and
hostgroup name
@feature: Locations
@assert: hostgroup is added to location then removed
"""
strategy, value = common_locators['all_values_selection']
with Session(self.browser) as session:
f | or host_grp_name in generate_strings_list():
with self.subTest(host_grp_name):
loc_name = gen_string('alpha')
host_grp = entities.HostGroup(name=host_grp_name).create()
self.assertEqual(host_grp.name, host_grp_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_hostgrps'])
element = session.nav.wait_until_element(
(strategy, value % host_grp_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.hostgroup.delete(host_grp_name)
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_hostgrps'])
element = session.nav.wait_until_element(
(strategy, value % host_grp_name))
# Item is listed in 'All Items' list and not
# 'Selected Items' list.
self.assertIsNone(element)
@run_only_on('sat')
@tier2
def test_positive_remove_compresource(self):
"""Remove compute resource by using the location name and
compute resource name
@feature: Locations
@assert: compute resource is added then removed
"""
strategy, value = common_locators['entity_select']
strategy1, value1 = common_locators['entity_deselect']
with Session(self.browser) as session:
for resource_name in generate_strings_list():
with self.subTest(resource_name):
loc_name = gen_string('alpha')
url = LIBVIRT_RESOURCE_URL % settings.server.hostname
resource = entities.LibvirtComputeResource(
name=resource_name, url=url
).create()
self.assertEqual(resource.name, resource_name)
set_context(session, org=ANY_CONTEXT['org'])
make_loc(session, name=loc_name, resources=[resource_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators['context.tab_resources'])
element = self.location.wait_until_element(
(strategy1, value1 % resource_name))
# Item is listed in 'Selected Items' list and not
# 'All Items' list.
self.assertIsNotNone(element)
self.location.update(loc_name, resources=[resource_name])
self.location.search(loc_name).click()
session.nav.click(tab_locators |
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, | test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(p | ickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array(
[ [sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]
], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259 and gh-441"""
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder |
# coding: utf-8
from __future__ import absolute_import
#
from esm.models.bind_resource import BindResource
from .base_model_ import Model
from ..util import deserialize_model
class BindingRequest(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, app_guid: str=None, plan_id: str=None, service_id: str=None, parameters: object=None, bind_resource: BindResource=None):
"""
BindingRequest - a model defined in Swagger
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
:param service_id: The service_id of this BindingRequest.
:type service_id: str
:param parameters: The parameters of this BindingRequest.
:type parameters: object
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self.swagger_types = {
'app_guid': str,
'plan_id': str,
'service_id': str,
'parameters': object,
'bind_resource': BindResource
}
self.attribute_map = {
'app_guid': 'app_guid',
'plan_id': 'plan_id',
'service_id': 'service_id',
'parameters': 'parameters',
'bind_resource': 'bind_resource'
}
self._app_guid = app_guid
self._plan_id = plan_id
self._service_id = service_id
self._parameters = parameters
self._bind_resource = bind_resource
@classmethod
def from_dict(cls, dikt) -> 'BindingRequest':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BindingRequest of this BindingRequest.
:rtype: BindingRequest
"""
return deserialize_model(dikt, cls)
@property
def app_guid(self) -> str:
"""
Gets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application associated with the binding to be created. If present, MUST be a non-empty string.
:return: The app_guid of this BindingRequest.
:rtype: str
"""
return self._app_guid
@app_guid.setter
def app_guid(self, app_guid: str):
"""
Sets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application associated with the binding to be created. If present, MUST be a non-empty string.
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
"""
self._app_guid = app_guid
@property
def plan_id(self) -> str:
"""
Gets the plan_id of this BindingRequest.
ID of the plan from the catalog. MUST be a non-empty string.
:return: The plan_id of this BindingRequest.
:rtype: str
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id: str):
"""
Sets the plan_id of this BindingRequest.
ID of the p | lan from the catalog. MUST be a non-empty string.
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
"""
if plan_id is None:
raise ValueError("Invalid value for `plan_id`, must not be `None`")
self._plan_id = plan_id
@property
def service_id(self) -> str:
"""
Gets the service_id | of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:return: The service_id of this BindingRequest.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id: str):
"""
Sets the service_id of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:param service_id: The service_id of this BindingRequest.
:type service_id: str
"""
if service_id is None:
raise ValueError("Invalid value for `service_id`, must not be `None`")
self._service_id = service_id
@property
def parameters(self) -> object:
"""
Gets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:return: The parameters of this BindingRequest.
:rtype: object
"""
return self._parameters
@parameters.setter
def parameters(self, parameters: object):
"""
Sets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:param parameters: The parameters of this BindingRequest.
:type parameters: object
"""
self._parameters = parameters
@property
def bind_resource(self) -> BindResource:
"""
Gets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:return: The bind_resource of this BindingRequest.
:rtype: BindResource
"""
return self._bind_resource
@bind_resource.setter
def bind_resource(self, bind_resource: BindResource):
"""
Sets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self._bind_resource = bind_resource
|
")[1])
# Exception when the relation's destination is
# an individual from the same class
if relation["name"] == className:
relation["name"] = '"self"'
else:
relation["name"] = '"%s"' % relation["name"]
relationType = restriction.find("owl:onProperty", namespaces)
relationTypeURI = relationType.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
relation["type"] = relationTypeURI.split("#")[1]
# Guesses the destination of the relation based on the name.
# Name should be "has_..."
if relation["type"].find('has') == 0:
relation["destination"] = pron(relation["type"][3:])
# Get the property's options
options = self.propOptions(relation["type"])
# Help text
relation["help_text"] = get(options, "help_text").replace("'", "\\'")
# Verbose name
relation["verbose_name"] = get(options, "verbose_name")
relation["type"] = relation["type"]
# Adds the relationship to the array containing all relationships for the class only
# if the relation has a destination
if "destination" in relation:
relations.append(relation)
# If there is a property defined in the subclass
elif restriction.find("owl:onDataRange", namespaces) is not None or restriction.find("owl:someValuesFrom", namespaces) is not None:
propertyTypeElement = restriction.find("owl:onProperty", namespaces)
propertyTypeURI = propertyTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
propertyType = propertyTypeURI.split("#")[1]
if restriction.find("owl:onDataRange", namespaces) is not None:
dataTypeElement = restriction.find("owl:onDataRange", namespaces)
else:
dataTypeElement = restriction.find("owl:someValuesFrom", namespaces)
dataTypeURI = dataTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
t = dataTypeURI.split("#")[1]
if t in correspondanceTypes:
dataType = correspondanceTypes[t]
# Get the property's options
options = self.propOptions(propertyType)
prop = {
"name" : propertyType,
"type" : dataType,
# Help text
"help_text": get(options, "help_text").replace("'", "\\'"),
# Verbose name
"verbose_name": get(options, "verbose_name")
}
properties.append(prop)
else:
raise CommandError("Property '%s' of '%s' using unkown type: %s" % (propertyType, className, t) )
models.append({
"className" : className,
"scope" : scope,
"help_text" : help_text,
"verbose_name" : verbose_name,
"verbose_name_plural": verbose_name_plural,
"parentClass" : parentClass,
"properties" : properties,
"relations" : relations,
"dependencies" : [parentClass]
})
# Topological sort of the model to avoid dependance missings
models = self.topolgical_sort(models)
# Output the models file
self.print_models(models, headers)
# option of the given property
def propOptions(self, name):
options = None
attr = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about"
for p in self.root.findall("owl:ObjectProperty", namespaces):
if re.search('#%s$' % name, p.attrib[attr]):
options = p
for p in self.root.findall("owl:DatatypeProperty", namespaces):
if re.search('#%s$' % name, p.attrib[attr]):
options = p
return options
@staticmethod
def print_models(models=[], headers=[]):
modelsContents = headers
for m in models:
| # Writes the class in models.py
modelsContents.append("\nclass "+ m["className"] +"(models.NodeModel):")
# Defines properties and relations that every model have
m["properties"].insert(0,
| {
"name" : "_author",
"type": "IntArrayProperty",
# Verbose name
"verbose_name": "author",
"help_text": "People that edited this entity."
}
)
m["properties"].insert(1,
{
"name" : "_status",
"type": "IntegerProperty",
# Verbose name
"verbose_name": "status",
"help_text": ""
}
)
# Since neo4django doesn't support model inheritance correctly
# we use models.NodeModel for every model
# and duplicates parent's attributes into its child
if m["parentClass"] != "models.NodeModel":
modelsContents.append("\t_parent = u'%s'" % m["parentClass"])
# Find the models that could be the parent of the current one
parents = [model for model in models if model["className"] == m["parentClass"] ]
# We found at least one parent
if len(parents):
# We take the first one
parent = parents[0]
# We merge the properties and the relationships
m["properties"] = merge(parent["properties"], m["properties"], "name")
m["relations"] = merge(parent["relations"], m["relations"], "destination")
if m["scope"] != '' and m["scope"] != None:
modelsContents.append("\t_topic = u'%s'" % m["scope"])
if m["help_text"] != None:
modelsContents.append("\t_description = u'%s'" % m["help_text"])
# Writes the properties
for prop in m["properties"]:
opt = [
"null=True",
"help_text=u'%s'" % prop["help_text"]
]
if prop["verbose_name"] != '':
opt.append("verbose_name=u'%s'" % prop["verbose_name"])
field = "\t%s = models.%s(%s)"
opt = ( pron(prop["name"]), prop["type"], ",".join(opt))
modelsContents.append(field % opt )
# Writes the relationships
for rel in m["relations"]:
opt = [
rel["name"],
"null=True",
# Add class name prefix to relation type
"rel_type='%s+'" % pron( m["className"] + "_" + rel["type"] ),
"help_text=u'%s'" % rel["help_text"]
]
if prop["verbose_name"] != '':
opt.append("verbose_name=u'%s'" % rel["verbose_name"])
field = "\t%s = models.Relationship(%s)"
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._actor_id))
self._triggered = True
if self.repeats:
self.reset | ()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def repeat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def register(node, actor, events=None):
| """
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
@TODO: Handle migration (automagically and otherwise.)
"""
return TimerHandler(node=node, actor=actor)
|
import serial
ser = serial.Serial('/dev/ttyUSB2',38400)
while True:
try:
x = ser.read()
f=open('gesture_command.txt | ','w')
f.write(x)
f.close()
exce | pt:
print "Gesture serial : port error!"
break
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_artic | le=article).save()
def convert_articles(apps, schema_editor):
| '''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('iogt', '0002_create_importers_group'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
e necessary logic.
return
field_list = [name for (name, value) in attrs.items()
if (value.get('required_by_policy') or
value.get('primary_key') or 'default' not in value)]
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
if plugin:
if utils.is_member_action(controller):
getter = controller.parent_controller.plugin_shower
else:
getter = controller.plugin_shower
getter_args = [neutron_context, resource_id]
if parent_id:
getter_args.append(parent_id)
return getter(*getter_args, fields=field_list)
else:
# Some legit resources, like quota, do not have a plugin yet.
# Retrieving the original object is nevertheless important
# for policy checks.
return _custom_getter(resource, resource_id)
class PolicyHook(hooks.PecanHook):
priority = 140
def before(self, state):
# This hook should be run only for PUT,POST and DELETE methods and for
# requests targeting a neutron resource
resources = state.request.context.get('resources', [])
if state.request.method not in ('POST', 'PUT', 'DELETE'):
return
# As this routine will likely alter the resources, do a shallow copy
resources_copy = resources[:]
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
# If there is no resource for this request, don't bother running authZ
# policies
if not resource:
return
controller = utils.get_controller(state)
if not controller or utils.is_member_action(controller):
return
collection = state.request.context.get('collection')
needs_prefetch = (state.request.method == 'PUT' or
state.request.method == 'DELETE')
policy.init()
action = controller.plugin_handlers[
pecan_constants.ACTION_MAP[state.request.method]]
# NOTE(salv-orlando): As bulk updates are not supported, in case of PUT
# requests there will be only a single item to process, and its
# identifier would have been already retrieved by the lookup process;
# in the case of DELETE requests there won't be any item to process in
# the request body
original_resources = []
if needs_prefetch:
try:
item = resources_copy.pop()
except IndexError:
# Ops... this was a delete after all!
item = {}
resource_id = state.request.context.get('resource_id')
parent_id = state.request.context.get('parent_id')
method = state.request.method
resource_obj = fetch_resource(method, neutron_context, controller,
collection, resource, resource_id,
parent_id=parent_id)
if resource_obj:
original_resources.append(resource_obj)
obj = copy.copy(resource_obj)
obj.update(item)
obj[const.ATTRIBUTES_TO_UPDATE] = item.keys()
# Put back the item in the list so that policies could be
# enforced
resources_copy.append(obj)
# TODO(salv-orlando): as other hooks might need to prefetch resources,
# store them in the request context. However, this should be done in a
# separate hook which is conveniently called before all other hooks
state.request.context['original_resources'] = original_resources
for item in resources_copy:
try:
policy.enforce(
neutron_context, action, item,
pluralized=collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to
# return a 403. Otherwise, pretend that it doesn't exist
# to avoid giving away information.
controller = utils.get_controller(state)
s_action = controller.plugin_handlers[controller.SHOW]
if not policy.check(neutron_context, s_action, item,
pluralized=collection):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def after(self, state):
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
collection = state.request.context.get('collection')
controller = utils.get_controller(state)
if not resource:
# can't filter a resource we don't recognize
return
# NOTE(kevinbenton): extension listing isn't controlled by policy
if resource == 'extension':
return
try:
data = state.response.json
except ValueError:
return
if state.request.method not in pecan_constants.ACTION_MAP:
return
if not data or (resource not in data and collection not in data):
return
policy.init()
is_single = resource in data
action_type = pecan_constants.ACTION_MAP[state.request.method]
if action_type == 'get':
action = controller.plugin_handlers[controller.SHOW]
else:
action = controller.plugin_handlers[action_type]
key = resource if is_single else collection
to_process = [data[resource]] if is_single else data[collection]
# in the single case, we enforce which raises on violation
# in the plural case, we just check so violating items are hidden
policy_method = policy.enforce if is_single else policy.check
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
try:
resp = [self._get_filtered_item(state.request, controller,
resource, collection, item)
for item in to_process
if (state.request.method != 'GET' or
policy_method(neutron_context, action, item,
plugin=plugin,
| pluralized=collection))]
ex | cept oslo_policy.PolicyNotAuthorized:
# This exception must be explicitly caught as the exception
# translation hook won't be called if an error occurs in the
# 'after' handler. Instead of raising an HTTPNotFound exception,
# we have to set the status_code here to prevent the catch_errors
# middleware from turning this into a 500.
state.response.status_code = 404
return
if is_single:
resp = resp[0]
state.response.json = {key: resp}
def _get_filtered_item(self, request, controller, resource, collection,
data):
neutron_context = request.context.get('neutron_context')
to_exclude = self._exclude_attributes_by_policy(
neutron_context, controller, resource, collection, data)
return self._filter_attributes(request, data, to_exclude)
def _filter_attributes(self, request, data, fields_to_strip):
# This routine will remove the fields that were requested to the
# plugin for policy evaluation but were not specified in the
# API request
return dict(item for item in data.items()
if item[0] not in fields_to_strip)
def _exclude_attributes_by_policy(self, context, controller, resource,
collection, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user becau |
"""
====================
Breadth-first search
====================
Basic algorithms for breadth-first searching the nodes of a graph.
"""
import networkx as nx
from collections import defaultdict, deque
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors']
def bfs_edges(G, source, reverse=False):
"""Produce edges in a breadth-first-search starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
edges: generator
A generator of edges in the breadth-first-search.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
if reverse and isinstance(G, nx.DiGraph):
neighbors = G.predecessors
else:
neighbors = G.neighbors
visited = set([source])
queue = deque([(source, neighbors(source))])
while queue:
parent, children = queue[0]
try:
child = next(children)
if child not in visited:
yield parent, child
visited.add(child)
queue.append((child, neighbors(child)))
except StopIteration:
queue.popleft()
def bfs_tree(G, source, reverse=False):
"""Return an oriented tree constructed from of a breadth-first-search
starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting nod | e for breadth-first search and return ed | ges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
T: NetworkX DiGraph
An oriented tree
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
T = nx.DiGraph()
T.add_node(source)
T.add_edges_from(bfs_edges(G,source,reverse=reverse))
return T
def bfs_predecessors(G, source):
"""Return dictionary of predecessors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
pred: dict
A dictionary with nodes as keys and predecessor nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_predecessors(G,0))
{1: 0, 2: 1}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
return dict((t,s) for s,t in bfs_edges(G,source))
def bfs_successors(G, source):
"""Return dictionary of successors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
succ: dict
A dictionary with nodes as keys and list of succssors nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_successors(G,0))
{0: [1], 1: [2]}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
d = defaultdict(list)
for s,t in bfs_edges(G,source):
d[s].append(t)
return dict(d)
|
# Performs network checks
from subprocess import Popen, PIPE
from includes.output import *
class FirewallChecks:
# Constructor
def __init__(self, uuid = None):
# if uuid == None then check the host
self.uuid = uuid
def checkIpfwRule(self, permission, fromIP, toIP, toPort, direction):
cmd = ['ipfw', 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 'trd-' + self.uuid] + cmd
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
for line in stdOutString.splitlines():
words = line.split()
# chcek against this line
if (words[1] == permission) and (words[7] == fromIP) and (words[9] == toIP) and (words[11] == toPort):
return True
return False
# checks that a value exists in an ipfw table
def checkIpfwTable(se | lf, tableNum, value):
cmd = ['ipfw', 'table',str(tableNum), 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 't | rd-' + self.uuid] + cmd
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
if (process.returncode != 0):
e_error("Failed to check ipfw table")
print(stdOutString)
print(stdErrString)
print('exitcode: ' + process.returncode)
exit(process.returncode)
# loop over the lines looking for our value
for line in stdOutString.splitlines():
if (line.split()[0] == value):
return True
return False
|
InstrumentedAttribute."""
accepts_scalar_loader = True
uses_objects = False
supports_population = True
collection = False
__slots__ = '_replace_token', '_append_token', '_remove_token'
def __init__(self, *arg, **kw):
super(ScalarAttributeImpl, self).__init__(*arg, **kw)
self._replace_token = self._append_token = None
self._remove_token = None
def _init_append_token(self):
self._replace_token = self._append_token = Event(self, OP_REPLACE)
return self._replace_token
_init_append_or_replace_token = _init_append_token
def _init_remove_token(self):
self._remove_token = Event(self, OP_REMOVE)
return self._remove_token
def delete(self, state, dict_):
# TODO: catch key errors, convert to attributeerror?
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, self._remove_token)
state._modified_event(dict_, self, old)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_scalar_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_scalar_attribute(self, state, current)
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
if self.dispatch._active_history:
old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET)
else:
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(state, dict_,
value, old, initiator)
state._modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(
state, value, previous,
initiator or self._replace_token or
self._init_append_or_replace_token())
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
fn(state, value,
initiator or self._remove_token or self._init_remove_token())
@property
def type(self):
self.property.columns[0].type
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = False
__slots__ = ()
def delete(self, state, dict_):
old = self.get(state, dict_)
self.fire_remove_event(
state, dict_, old,
self._remove_token or self._init_remove_token())
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSI | VE_OFF):
if self.key in dict_:
return History.from_object_attribute(self, state, dict_[self.key])
else:
if passive & INIT_OK:
passive ^= INIT_OK
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_object_attribute(self, state, c | urrent)
def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
if self.key in dict_:
current = dict_[self.key]
elif passive & CALLABLES_OK:
current = self.get(state, dict_, passive=passive)
else:
return []
# can't use __hash__(), can't use __eq__() here
if current is not None and \
current is not PASSIVE_NO_RESULT and \
current is not NEVER_SET:
ret = [(instance_state(current), current)]
else:
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original is not None and \
original is not PASSIVE_NO_RESULT and \
original is not NEVER_SET and \
original is not current:
ret.append((instance_state(original), original))
return ret
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
"""Set a value on the given InstanceState.
"""
if self.dispatch._active_history:
old = self.get(
state, dict_, passive=PASSIVE_ONLY_PERSISTENT | NO_AUTOFLUSH)
else:
old = self.get(state, dict_, passive=PASSIVE_NO_FETCH ^ INIT_OK)
if check_old is not None and \
old is not PASSIVE_NO_RESULT and \
check_old is not old:
if pop:
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'" % (
instance_str(check_old),
state_str(state),
self.key
))
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or
self._remove_token or self._init_remove_token())
state._modified_event(dict_, self, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if (previous is not value and
previous not in (None, PASSIVE_NO_RESULT, NEVER_SET)):
self.sethasparent(instance_state(previous), state, False)
for fn in self.dispatch.set:
value = fn(
state, value, previous, initiator or
self._replace_token or self._init_append_or_replace_token())
state._modified_event(dict_, self, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), state, True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent bag
semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
uses_objects = True
supports_population = True
collection = True
__slots__ = (
'copy', 'collection_factory', '_append_token', '_remove_token',
'_duck_typed_as'
)
def __init__(self, class_, key, callable_, dispatch,
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_, dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_fact |
from django.test import TestCase
from manager.models import Page
from datetime import datetime, timedelta
from django.utils import timezone
class PageTestCase(TestCase):
def setUp(self):
now = timezone.now()
Page.objects.create(url="testurl", description="test description")
def test_regular_page_active(self):
"""Page with no pause or time/date range is active."""
page = Page.objects.get(url="/testurl")
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
def test_paused_page_not_active(self):
"""Page that has been paused is not active."""
page = Page.objects.get(url="/testurl")
page.pause_at = timezone.now().replace(hour=12)
current_time = timezone.now().replace(hour=13)
self.assertTrue(page.is_paused(current_time))
self.assertFalse(page.is_active(current_time))
def test_previously_paused_page_active(self):
"""Page that has is not paused but has been in the p | ast | is active."""
page = Page.objects.get(url="/testurl")
page.paused_at = timezone.now() - timedelta(hours=48)
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
page.paused_at = timezone.now()
morning = timezone.now().replace(hour=6)
self.assertFalse(page.is_paused(morning))
self.assertTrue(page.is_active(morning))
def test_page_active_time_of_day(self):
"""Page has certain times of day it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now().replace(hour=12)
# Default page has no times -> active
self.assertTrue(page.is_active(now))
# Set start time in the future
page.active_time_start = now.replace(hour=13).time()
self.assertFalse(page.is_active(now))
# Set time to be past start time
now = now.replace(hour=14)
self.assertTrue(page.is_active(now))
# Set end time in the future, still active
page.active_time_end = now.replace(hour=15).time()
self.assertTrue(page.is_active(now))
# Set time to be past end-time -> inactive
now = now.replace(hour=16)
self.assertFalse(page.is_active(now))
# Set start time in the future but bigger than end-time
page.active_time_start = now.replace(hour=17).time()
self.assertFalse(page.is_active(now))
# Time bigger than start time in the evening
now = now.replace(hour=19)
self.assertTrue(page.is_active(now))
def test_page_date_range(self):
"""Page has certains dates it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now()
today = now.date()
page.active_date_start = today
self.assertTrue(page.is_active(now))
page.active_date_start = today + timedelta(days=1)
self.assertFalse(page.is_active(now))
page.active_date_start = today - timedelta(days=7)
page.active_date_end = today - timedelta(days=3)
self.assertFalse(page.is_active(now))
def test_page_weekdays(self):
"""Page is active on certain weekdays"""
page = Page.objects.get(url="/testurl")
now = datetime(2014, 4, 28, 16, 53) # Monday
page.active_date_start = now.date()
self.assertTrue(page.is_active(now))
page.monday = False
self.assertFalse(page.is_active(now))
now = now + timedelta(days=1)
self.assertTrue(page.is_active(now))
|
ne # Set from REDTabSettings
self.script_manager = None # Set from REDTabSettings
self.image_version = None # Set from REDTabSettings
self.service_state = None # Set from REDTabSettings
self.brickd_conf = {}
self.cbox_brickd_ll.addItem('Error')
self.cbox_brickd_ll.addItem('Warn')
self.cbox_brickd_ll.addItem('Info')
self.cbox_brickd_ll.addItem('Debug')
self.cbox_brickd_rt.addItem('cpu')
self.cbox_brickd_rt.addItem('gpio')
self.cbox_brickd_rt.addItem('heartbeat')
self.cbox_brickd_rt.addItem('mmc')
self.cbox_brickd_rt.addItem('off')
self.cbox_brickd_rt.addItem('on')
self.cbox_brickd_gt.addItem('cpu')
self.cbox_brickd_gt.addItem('gpio')
self.cbox_brickd_gt.addItem('heartbeat')
self.cbox_brickd_gt.addItem('mmc')
self.cbox_brickd_gt.addItem('off')
self.cbox_brickd_gt.addItem('on')
# Signals/slots
self.pbutton_brickd_save.clicked.connect(self.slot_brickd_save_clicked)
self.pbutton_brickd_refresh.clicked.connect(self.slot_brickd_refresh_clicked)
self.sbox_brickd_la_ip1.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip2.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip3.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip4.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lp.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lwsp.valueChanged.connect(self.brickd_settings_changed)
self.ledit_brickd_secret.textEdited.connect(self.brickd_settings_changed)
self.cbox_brickd_ll.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_rt.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_gt.currentIndexChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_spi_dly.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_rs485_dly.valueChanged.connect(self.brickd_settings_changed)
def tab_on_focus(self):
self.brickd_conf_rfile = REDFile(self.session)
self.slot_brickd_refresh_clicked()
def tab_off_focus(self):
pass
def tab_destroy(self):
pass
def brickd_button_refresh_enabled(self, state):
self.pbutton_brickd_refresh.setEnabled(state)
if state:
self.pbutton_brickd_refresh.setText('Refresh')
else:
self.pbutton_brickd_refresh.setText('Refreshing...')
def brickd_button_save_enabled(self, state):
self.pbutton_brickd_save.setEnabled(state)
def update_brickd_widget_data(self):
if self.brickd_conf == None:
return
# Fill keys with default values if not available
if not 'listen.address' in self.brickd_conf:
self.brickd_conf['listen.address'] = '0.0.0.0'
if not 'listen.plain_port' in self.brickd_conf:
self.brickd_conf['listen.plain_port'] = '4223'
if not 'listen.websocket_port' in self.brickd_conf:
self.brickd_conf['listen.websocket_port'] = '0'
if not 'authentication.secret' in self.brickd_conf:
self.brickd_conf['authentication.secret'] = ''
if not 'log.level' in self.brickd_conf:
self.brickd_conf['log.level'] = 'info'
if not 'led_trigger.green' in self.brickd_conf:
self.brickd_conf['led_trigger.green'] = 'heartbeat'
if not 'led_trigger.red' in self.brickd_conf:
self.brickd_conf['led_trigger.red'] = 'off'
if not 'poll_delay.spi' in self.brickd_conf:
self.brickd_conf['poll_delay.spi'] = '50'
if not 'poll_delay.rs485' in self.brickd_conf:
self.brickd_conf['poll_delay.rs485'] = '4000'
l_addr = self.brickd_conf['listen.address'].split('.')
self.sbox_brickd_la_ip1.setValue(int(l_addr[0]))
self.sbox_brickd_la_ip2.setValue(int(l_addr[1]))
self.sbox_brickd_la_ip3.setValue(int(l_addr[2]))
self.sbox_brickd_la_ip4.setValue(int(l_addr[3]))
self.sbox_brickd_lp.setValue(int(self.brickd_conf['listen.plain_port']))
self.sbox_brickd_lwsp.setValue(int(self.brickd_conf['listen.websocket_port']))
self.ledit_brickd_secret.setText(self.brickd_conf['authentication.secret'])
log_level = self.brickd_conf['log.level']
if log_level == 'debug':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_DEBUG)
elif log_level == 'info':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_INFO)
elif log_level == 'warn':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_WARN)
elif log_level == 'error':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_ERROR)
trigger_green = self.brickd_conf['led_trigger.green']
if trigger_green == 'cpu':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_green == 'gpio':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_green == 'heartbeat':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_green == 'mmc':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_green == 'off':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
elif trigger_green == 'on':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_ON)
trigger_red = self.brickd_conf['led_trigger.red']
if trigger_red == 'cpu':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_red == 'gpio':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_red == 'heartbeat':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_red == 'mmc':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_red == 'off':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
elif trigger_red == 'on':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_ON)
self.sbox_brickd_spi_dly.setValue(int(self.brickd_conf['poll_delay.spi']))
self.sbox_brickd_rs485_dly.setValue(int(self.brickd_conf['poll_delay.rs485']))
# The slots
def brickd_settings_changed(self, value):
self.brickd_button_save_enabled(True)
def slot_brickd_refresh_clicked(self):
self.brickd_button_refresh_enabled(False)
def cb_open(red_file):
def cb_read(red_file, resul | t):
red_file.release()
if result and result.data is not None:
self.brickd_conf = config_parser.parse(result.data.decode('utf-8'))
self.update_brickd_widget_data()
else:
| QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error reading brickd config file.')
self.brickd_button_refresh_enabled(True)
self.brickd_button_save_enabled(False)
red_file.read_async(4096, lambda x: cb_read(red_file, x))
def cb_open_error():
self.brickd_button_refresh_enabled(True)
QMessageBox.critical(get_main_window(),
'Settings | Brick Daemon',
'Error opening brickd config file.')
async_call(self.brickd_conf_rfile.open,
(BRICKD_CONF_PATH, REDFile.FLAG_READ_ONLY | REDFile.FLAG_NON_BLOCKING, 0, 0, 0),
cb_open,
cb_open_error)
def slot_brickd_save_clicked(self):
adr = '.'.join((str(self.sbox_brickd_la_ip1.value()),
str(self.sbox_brickd_la_ip2.value()),
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 05:52:09 2016
@author: hclqaVirtualBox1
"""
from object_test import session
import random
import string
import model
test_page = model.Page()
N = 5
test_page.title = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
test_page.content = u'Test content'
print(test_page.title)
session.add(test_page)
print("1 ----- TestPage ID")
print(test_page.id)
"""
At this point the test_page object is known to SQLAlchemy,
but not to the database. To send it to the database,
a flush operation can be forced:
"""
session.flush()
print("2 ----- TestPage ID")
print (test_page.id)
"""
Commits - Commits the changes in db
"""
session.commit()
"""
Delete - To delete the test_page object from the database you would use:
"""
session.delete(test_page)
session.flush()
print("3 ----- TestPage ID")
print(test_page.id)
"""
rollback - At this point you can either commit
the transaction or do a rollback.
Let’s do a rollback this time:
"""
session.rollback()
print("4 ----- TestPage ID")
print(test_page.id)
"""
Query - Queries are performed with query objects that are created from the
session. The simplest way to create and use a query object is like this:
"""
page_q = session.query(model.Page)
for page in page_q:
print(page.title)
print("---- page_q.all()")
print(page_q.all())
page = page_q.first()
print(page.title)
print(page_q[2:5])
print(page_q.get(1).title)
#
#
#"""
#Working with Objects
#-------------------
#Now let’s think about how you could add a comment to a page.
# One approach would be to insert a new row in the comment table using the
# SQL Expression API, ensuring that t | he pageid field contained the value 1
# so that the comment was associated with the correct page via a foreign key.
#
# The Object-Relational API provides a much better approach:
#"""
#
#comment1 = model.Comment()
#comment1.name= u'James'
#comment1.email = u'james@example.com'
#comment1.conte | nt = u'This page needs a bit more detail ;-)'
#comment2 = model.Comment()
#comment2.name = u'Mike'
#comment2.email = u'mike@example.com'
#page.comments.append(comment1)
#page.comments.append(comment2)
#session.commit()
|
#!/usr/bin/env python
##################################################################################################
## mtrecv.py
##
## Receive message via RockBLOCK over serial
#################################################################################### | ##############
import sys
import os
from rbControl import RockBlockControl
if __name__ == '__main__':
if len(sys.argv) == 1:
# TODO: configurable serial device
RockBlockControl("/dev/ttyUSB0").mt_recv()
else:
print "usage: %s" % os.path.basename | (sys.argv[0])
exit(1)
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later | version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP | . If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class EndorsementLineCodeType(Model):
MODEL_MAP = {
'tag_name': 'EndorsementLineCode',
'attributes': {
'Type': {},
'Code': {}, # from grPostal
'*': {},
}
}
|
.STOCK_ADD)
self.widgets['splitSubsTB'] = Gtk.ToolButton()
self.widgets['splitSubsTB'].set_tooltip_text('Split Subtitle')
self.widgets['splitSubsTB'].set_stock_id(Gtk.STOCK_CUT)
self.widgets['visualSyncTB'] = Gtk.ToolButton()
self.widgets['visualSyncTB'].set_tooltip_text('Visual Sync')
self.widgets['visualSyncTB'].set_stock_id(Gtk.STOCK_REFRESH)
self.widgets['autoSyncOtherVersionTB'] = Gtk.ToolButton()
self.widgets['autoSyncOtherVersionTB'].set_tooltip_text('Try to automatically sync another version.')
self.widgets['autoSyncOtherVersionTB'].set_stock_id(Gtk.STOCK_UNINDENT)
self.widgets['checkTB'] = Gtk.ToolButton()
self.widgets['checkTB'].set_tooltip_text('Check Subtitles')
self.widgets['checkTB'].set_stock_id(Gtk.STOCK_SPELL_CHECK)
self.widgets['separator2TB'] = Gtk.SeparatorToolItem()
self.widgets['separator3TB'] = Gtk.SeparatorToolItem()
self.widgets['separator4TB'] = Gtk.SeparatorToolItem()
self.widgets['position-label'] = Gtk.Label('Position: 00:00:00,000 ')
self.widgets['duration-label'] = Gtk.Label('Duration: 00:00:00,000\t\t')
self.widgets['MergeSplitTB'] = Gtk.ToolButton()
self.widgets['MergeSplitTB'].set_tooltip_text('Merge/Split (non project subs)')
self.widgets['MergeSplitTB'].set_stock_id(Gtk.STOCK_PAGE_SETUP)
self.widgets['toolbar'].add(self.widgets['newFileTB'])
self.widgets['toolbar'].add(self.widgets['openFileTB'])
self.widgets['toolbar'].add(self.widgets['saveFileTB'])
self.widgets['toolbar'].add(self.widgets['separator1TB'])
self.widgets['toolbar'].add(self.widgets['preferencesTB'])
self.widgets['toolbar'].add(self.widgets['separator2TB'])
self.widgets['toolbar'].add(self.widgets['undoTB'])
self.widgets['toolbar'].add(self.widgets['redoTB'])
self.widgets['toolbar'].add(self.widgets['separator3TB'])
self.widgets['toolbar'].add(self.widgets['importSRTTB'])
self.widgets['toolbar'].add(self.widgets['splitSubsTB'])
#self.widgets['toolbar'].add(self.widgets['autoSyncOtherVersionTB'])
self.widgets['toolbar'].add(self.widgets['checkTB'])
self.widgets['toolbar'].add(self.widgets['visualSyncTB'])
self.widgets['toolbar'].add(self.widgets['separator4TB'])
self.widgets['toolbar'].add(self.widgets['MergeSplitTB'])
# AudioView Context Menu
self.widgets['AudioContextMenu'] = Gtk.Menu()
self.widgets['ACM-SplitHere'] = Gtk.MenuItem('Split Subtitle')
self.widgets['ACM-CreateHere'] = Gtk.MenuItem('New Subtitle')
self.widgets['ACM-DeleteSub'] = Gtk.MenuItem('Delete Subtitle')
self.widgets['ACM-ResetAudioScale'] = Gtk.MenuItem('Reset Vertical Zoom')
self.widgets['ACM-StickZoom'] = Gtk.CheckMenuItem('Stick Zoom')
self.widgets['ACM-StickZoom'].set_tooltip_text('Keep current horizontal zoom size')
self.widgets['AudioContextMenu'].add(self.widgets['ACM-CreateHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-SplitHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-DeleteSub'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-ResetAudioScale'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-StickZoom'])
self.widgets['ACM-StickZoom'].show()
self.widgets['ACM-SplitHere'].show()
self.widgets['ACM-CreateHere'].show()
self.widgets['ACM-DeleteSub'].show()
# Header Context Menu
self.widgets['HeaderContextMenu'] = Gtk.Menu()
self.widgets['HCM-N'] = Gtk.CheckMenuItem('N')
self.widgets['HCM-StartTime'] = Gtk.CheckMenuItem('StartTime')
self.widgets['HCM-StopTime'] = Gtk.CheckMenuItem('StopTime')
self.widgets['HCM-Duration'] = Gtk.CheckMenuItem('Duration')
self.widgets['HCM-Reference'] = Gtk.CheckMenuItem('Reference')
self.widgets['HCM-RS'] = Gtk.CheckMenuItem('RS')
self.widgets['HCM-Count'] = Gtk.CheckMenuItem('Count')
self.widgets['HCM-Info'] = Gtk.CheckMenuItem('Info')
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-N'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StartTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StopTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Duration'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Reference'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-RS'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Count'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Info'])
self.widgets['HCM-N'].set_active(True)
self.widgets['H | CM-StartTime'].set_active(True)
self.widgets['HCM-StopTime'].set_active(True)
self.widgets['HCM-Duration'].set_active(True)
self.widgets['HCM-Reference'].set_active(True)
self.widgets['HCM-RS'].set_active(True)
self.widgets['HCM-Count'].set_active(True)
self.widgets['HCM-Info'].set_active(True)
self.widgets['HCM-N'].show()
self.widgets['HCM-StartTime'].show()
self.widgets['HCM-StopTime'].show()
| self.widgets['HCM-Duration'].show()
self.widgets['HCM-Reference'].show()
self.widgets['HCM-RS'].show()
self.widgets['HCM-Count'].show()
self.widgets['HCM-Info'].show()
# TreeView Context Menu
self.widgets['TVContextMenu'] = Gtk.Menu()
self.widgets['TVCM-Delete'] = Gtk.MenuItem('Delete Subtitle(s)')
self.widgets['TVCM-Merge'] = Gtk.MenuItem('Merge Subtitles')
self.widgets['TVCM-Merge-To-Dialog'] = Gtk.MenuItem('Merge to Dialog')
self.widgets['TVCM-DurationEdit'] = Gtk.MenuItem('Edit Duration')
self.widgets['TVCM-TimeEditDialog'] = Gtk.MenuItem('Edit Time')
self.widgets['TVCM-SyncDialog'] = Gtk.MenuItem('Sync')
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Merge'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Merge-To-Dialog'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-Delete'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-DurationEdit'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-TimeEditDialog'])
self.widgets['TVContextMenu'].add(self.widgets['TVCM-SyncDialog'])
self.widgets['TVCM-Delete'].show()
self.widgets['TVCM-Merge'].show()
self.widgets['TVCM-Merge-To-Dialog'].show()
# Video Context Menu
self.widgets['VideoContextMenu'] = Gtk.Menu()
self.widgets['VCM-SceneDetect'] = Gtk.MenuItem('Detect Scene Changes')
self.widgets['VCM-StopDetection'] = Gtk.MenuItem('Stop Detection')
self.widgets['VCM-TwoPassSD'] = Gtk.CheckMenuItem('Two-pass Detection (faster)')
self.widgets['VCM-TwoPassSD'].set_active(True)
self.widgets['VCM-Detach'] = Gtk.MenuItem('Detach Video')
self.widgets['VCM-ChangeSubFont'] = Gtk.MenuItem('Change Subtitle Font')
self.widgets['VCM-Separator1'] = Gtk.SeparatorMenuItem()
self.widgets['VCM-Separator2'] = Gtk.SeparatorMenuItem()
self.widgets['VCM-Lock'] = Gtk.CheckMenuItem('Lock')
self.widgets['VCM-Close'] = Gtk.MenuItem('Close')
self.widgets['VideoContextMenu'].add(self.widgets['VCM-SceneDetect'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-StopDetection'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-TwoPassSD'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Separator1'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-ChangeSubFont'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Separator2'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Detach'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Lock'])
self.widgets['VideoContextMenu'].add(self.widgets['VCM-Close'])
self.widgets['VCM-SceneDetect'].show()
self.widgets['VCM-StopDetection'].hide()
self.wi |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'le0az@o@j&x@5gl01_fp6&rj445lmxj15ngt2x^x#$ng71)^yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.myapp',
'apps.outsideapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
| 'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
| 'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
import asyncio
import unittest
import random
from gremlinpy import Gremlin
from . import ConnectionTestCases, EntityTestCas | es, MapperTestCases
from gizmo import Mapper, Request, Collection, Vertex, Edge
from gizmo.mapper import EntityMapper
class BaseTests(unittest.TestCase):
def setUp(self):
self.request = Request('localhost', port=8182)
self.gremlin = Gremlin('gizmo_testing')
self.mapper = Mapper(self.re | quest, self.gremlin)
self.ioloop = asyncio.get_event_loop()
super(BaseTests, self).setUp()
def tearDown(self):
super(BaseTests, self).tearDown()
async def purge(self):
script = "%s.V().map{it.get().remove()}" % self.gremlin.gv
res = await self.mapper.query(script=script)
return res
class ConnectionTests(BaseTests, ConnectionTestCases):
pass
class EntityTests(EntityTestCases, BaseTests):
pass
class MapperTests(MapperTestCases, BaseTests):
pass
class CollectionTests(BaseTests):
pass
class TraversalTests(BaseTests):
pass
if __name__ == '__main__':
unittest.main()
|
from yajuu.extractors.extractor import Extractor
from yajuu.media.sources.source_list import SourceList
class SeasonExtractor(Extractor):
def __init__(self, media, season, range_):
super().__init__(media)
self.seasons = {}
self.season = season
self.start, self.end = range_
# Overwrite
self.sources = {}
def _should_process(self, episode_identifier):
try:
episode_number = int(episode_identifier)
except ValueError:
return False
return self.start <= episode_number <= self.end
def _add_ | source(self, identifier, source):
if | identifier not in self.sources:
self.sources[identifier] = SourceList()
self.sources[identifier].add_source(source)
return True
def _add_sources(self, identifier, sources):
returned = []
if sources is None:
return
for source in sources:
returned.append(self._add_source(identifier, source))
return returned
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Uninett AS
#
# This file is part of Network Administration V | isualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
# |
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Unit tests for the dispatcher module."""
from __future__ import print_function
import types
import pytest
from nav.smsd import dispatcher
class TestDispatcherHandler(object):
"""Tests for the DispatcherHandler class.
Uses a subclass of the DispatcherHandler to provide a fake
dispatcher loader function. This loads a faked dispatcher
module/class that will cooperate with this unit test.
"""
config = {
'main': {'exit_on_permanent_error': 'yes'},
'dispatcher': {'dispatcherretry': '30',
'dispatcher1': 'FakeDispatcher'},
'FakeDispatcher': {}
}
def test_init_with_simple_config(self):
assert FakeDispatcherHandler(self.config)
def test_empty_message_list(self):
handler = FakeDispatcherHandler(self.config)
assert handler.sendsms('fakenumber', [])
def test_dispatcher_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('failure', [])
def test_dispatcher_unhandled_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('unhandled', [])
class FakeDispatcherHandler(dispatcher.DispatcherHandler):
def importbyname(self, name):
print("import by name: %r" % name)
fakemodule = types.ModuleType('fakedispatcher')
fakemodule.FakeDispatcher = FakeDispatcher
return fakemodule
class FakeDispatcher(object):
def __init__(self, *args, **kwargs):
self.lastfailed = None
pass
def sendsms(self, phone, msgs):
print("got phone %r and msgs %r" % (phone, msgs))
if phone == 'failure':
raise dispatcher.DispatcherError('FakeDispatcher failed')
elif phone == 'unhandled':
raise Exception('This exception should be unknown')
return (None, 1, 0, 1, 1)
|
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
class STwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that S parameters of media base elements versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=1j,
z0=50,
)
def test_s_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have S matrix of the form:
[ Z/Z0 / (Z/Z0 + 2) 2/(Z/Z0 + 2) ]
[ 2/(Z/Z0 + 2) Z/Z0 / (Z/Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
Z0 = self.dummy_media.z0
S11 = (R/Z0) / (R/Z0 + 2)
S21 = 2 / (R/Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have S matrix of the form:
[ -Y Z0 / (Y Z0 + 2) 2/(Y Z0 + 2) ]
[ 2/(Y Z0 + 2) Z/Z0 / (Y Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
Z0 = self.dummy_media.z0
S11 = -(1/R*Z0) / (1/R*Z0 + 2)
S21 = 2 / (1/R*Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossless_line(self):
"""
Lossless transmission line of characteristic impedance z1, length l
and wavenumber beta
_______
○----- -----○
z0 z1 z0
○-----_______-----○
"""
l = 5.0
z1 = 30.0
z0 = self.dummy_media.z0
ntw = self.dummy_media.line(d=0, unit='m', z0=z0) \
** self.dummy_media.line(d=l, unit='m', z0=z1) \
** self.dummy_media.line(d=0, unit='m', z0=z0)
beta = self.dummy_media.beta
_z1 = z1/z0
S11 = 1j*(_z1**2 - 1)*npy.sin(beta*l) / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
S21 = 2*_z1 / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
class ABCDTwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that ABCD parameters of media base elements (such as lumped elements)
versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21,'GHz'),
gamma=1j,
z0=50 ,
)
def test_abcd_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have ABCD matrix of the form:
[ 1 Z ]
[ 0 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have ABCD matrix of the form:
[ 1 0 ]
[ Y 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_series_shunt_elements(self):
"""
Series and Shunt elements of impedance Zs and Zp:
○---[Zs]--------○
|
[Zp]
|
○--------------○
have ABCD matrix of the form:
[ 1 + Zs/Zp Zs ]
[ 1/Zp 1 ]
"""
Rs = 2.0
Rp = 3.0
serie_resistor = self.dummy_media.resistor(Rs)
shunt_resistor = self.dummy_media.shunt(self.dummy_media.resistor(Rp) ** self.dummy_media.short())
ntw = serie_resistor ** shunt_resistor
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0+Rs/Rp)
npy.testing.assert_array_al | most_equal(ntw.a[:,0,1], Rs)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/Rp)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_thru(self):
"""
Thru has ABCD ma | trix of the form:
[ 1 0 ]
[ 0 1 ]
"""
ntw = self.dummy_media.thru()
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_lossless_line(self):
"""
Lossless transmission line of characteristic impedance Z0, length l
and wavenumber beta
○---------○
○---------○
has ABCD matrix of the form:
[ cos(beta l) j Z0 sin(beta l) ]
[ j/Z0 sin(beta l) cos(beta l) ]
"""
l = 5
z0 = 80
ntw = self.dummy_media.line(d=l, unit='m', z0=z0)
beta = self.dummy_media.beta
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cos(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 1j*z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1j/z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cos(beta*l))
def test_abcd_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
l = 5.0
z0 = 30.0
alpha = 0.5
beta = 2.0
lossy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=alpha + 1j*beta,
z0=z0
)
ntw = lossy_media.line(d=l, un |
#!/usr/bin/env python
"""Tests for API call routers."""
from absl import app
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto import tests_pb2
from grr_response_server import access_control
from grr_response_server.gui import api_call_router
from grr.test_lib import test_lib
class SingleMethodDummyApiCallRouter(api_call_router.ApiCallRouter):
"""Dummy ApiCallRouter implementation overriding just a single method."""
@api_call_router.Http("GET", "/api/foo/bar")
def SomeRandomMethod(self, args, context=None):
pass
def CreateFlow(self, args, context=None):
pass
class SingleMethodDummyApiCallRouterChild(SingleMethodDummyApiCallRouter):
pass
class EmptyRouter(api_call_router.ApiCallRouterStub):
pass
class ApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testAllAnnotatedMethodsAreNotImplemented(self):
# We can't initialize ApiCallRouter directly because it's abstract.
router = EmptyRouter()
for name in api_call_router.ApiCallRouter.GetAnnotatedMethods():
with self.assertRaises(NotImplementedError):
getattr(router, name)(None, context=None)
def testGetAnnotatedMethodsReturnsNonEmptyDict(self):
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
self.assertTrue(methods)
def testGetAnnotatedMethodsReturnsMethodsFromAllClassesInMroChain(self):
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouter.GetAnnotatedMethods())
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouterChild.GetAnnotatedMethods())
def testHttpUrlParametersMatchArgs(self):
"""Tests that URL params are actual fields of ArgsType in HTTP routes."""
# Example:
# @ArgsType(api_client.ApiGetClientArgs)
# @Http("GET", "/api/clients/<client_id>")
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
for method in methods.values():
if method.args_type is None:
continue # Skip methods like ListOutputPluginDescriptors.
valid_parameters = method.args_type.type_infos.descriptor_names
for name in method.GetQueryParamsNames():
self.assertIn(
name, valid_parameters,
"Parameter {} in route {} is not found in {}. "
"Valid parameters are {}.".format(
name, method.name, compatibility.GetName(method.args_type),
valid_parameters))
def testRouterMethodNamesAreInLengthLimit(self):
for name in api_call_router.ApiCallRouterStub.GetAnnotatedMethods():
self.assertLessEqual(
len(name), 128,
"Router method name {} exceeds MySQL length limit of 128.".format(
name))
class DisabledApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testRaisesUnauthorizedAccess(self):
router = api_call_router.DisabledApiCallRouter( | )
with self.assertRaises(access_control.UnauthorizedAccess):
router.SearchClients(None)
class ApiSingleStringArgument(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.ApiSingleStringArgument
class RouterMethodMetadataTest(test_lib.GRRBaseTest):
"""Tests for RouterMethodMetadata."""
def testGetQueryParamsNamesReturnsEmptyListsOnEmptyMetadata(self):
m = api_call_router.RouterMethodMetadata("SomeMet | hod")
self.assertEqual(m.GetQueryParamsNames(), [])
def testGetQueryParamsNamesReturnsMandaotryParamsCorrectly(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod", http_methods=[("GET", "/a/<arg>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["arg", "zoo"])
def testGetQueryParamsNamesReturnsOptionalParamsForGET(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("GET", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo", "arg"])
def testGetQueryParamsNamesReturnsNoOptionalParamsForPOST(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("POST", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo"])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
ns import namedtuple
from twisted.python import usage
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.defer import Deferred, succeed
from txgithub.scripts import gist
from . _options import (_OptionsTestCaseMixin,
_FakeOptionsTestCaseMixin,
_FakePrintTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_SystemExit)
class OptionsTestCase(_OptionsTestCaseMixin):
"""
Tests for L{gist.Options}
"""
files = ('files',)
required_args = files
options_factory = gist.Options
def test_single_file_ok(self):
"""
Files is an argument.
"""
self.config.parseOptions(self.files)
self.assertEqual(self.config['files'], self.files)
def test_files_ok(self):
"""
Multiple files are collected.
"""
self.config.parseOptions(["file1", "file2"])
self.assertEqual(self.config['files'], ("file1", "file2"))
def test_token_ok(self):
"""
--token is an option.
"""
token = 'some token'
self.assert_option(['--token=' + token], 'token', token)
def test_t_ok(self):
"""
-t is short for --token
"""
token = 'some token'
self.assert_option(['-t', token], 'token', token)
class RecordsFakeGistsEndpoint(object):
"""
Records and orchestrates L{FakeGistsEndpoint}.
"""
def __init__(self):
self.create_calls = []
self.create_returns = Deferred()
class FakeGistsEndpoint(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder):
self._recorder = recorder
def create(self, files):
self._recorder.create_calls.append(files)
return self._recorder.create_returns
class RecordsFakeGithubAPI(object):
"""
Records and orchestrates L{FakeGithubAPI}.
"""
def __init__(self):
self.init_calls = []
class FakeGithubAPI(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder, gists):
self._recorder = recorder
self.gists = gists
def _init(self, token):
self._recorder.init_calls.append(token)
return self
class PostGistTests(SynchronousTestCase):
"""
Tests for L{gist.postGist}.
"""
def setUp(self):
self.token = "token"
self.getToken_call_count = 0
self.getToken_returns = succeed(self.token)
self.gists_recorder = RecordsFakeGistsEndpoint()
self.gists = FakeGistsEndpoint(self.gists_recorder)
self.api_recorder = RecordsFakeGithubAPI()
self.fake_api = FakeGithubAPI(self.api_recorder, self.gists)
self.content = u"content"
self.stdin = io.StringIO(self.content)
self.open_calls = []
self.open_returns = io.StringIO(self.content)
self.print_calls = []
self.patch(gist, "getToken", self.fake_getToken)
self.patch(gist, "GithubApi", self.fake_api._init)
self.patch(gist, "_open", self.fake_open)
self.patch(gist, "stdin", self.stdin)
self.patch(gist, "_print", self.fake_print)
def fake_getToken(self):
"""
A fake get token implementation that records its calls.
"""
self.getToken_call_count += 1
return self.getToken_returns
def fake_open(self, filename):
"""
A fake L{open} that records its calls.
"""
self.open_calls.append(filename)
return self.open_returns
def fake_print(self, *args):
"""
A fake L{print} that records its calls.
"""
self.print_calls.append(args)
def test_getToken_by_default(self):
"""
When no token is provided, the get token implementation is
called to retrieve one.
"""
gist.postGist("reactor", token="", files=["something"])
self.assertEqual(self.getToken_call_count, 1)
self.assertEqual(self.api_recorder.init_calls, [self.token])
def test_token_used(self):
"""
The provided token is used to connect to GitHub.
"""
token = "my token"
gist.postGist("reactor", token=token, files=["something"])
self.assertEqual(self.getToken_call_count, 0)
self.assertEqual(self.api_recorder.init_calls, [token])
def test_stdin_gist(self):
"""
When no files are provided, the gist is read from stdin.
"""
gist.postGist("reactor", token=self.token, files=())
self.assertEqual(self.gists_recorder.create_calls, [
{
"gistfile1": {
"content": self.content,
},
}
])
self.assertEqual(self.stdin.tell(), len(self.content))
def test_files_used(self):
"""
The filenames provided are read and comprise the gist's content.
"""
filename = "some file"
gist.postGist("reactor", token=self.token, files=[filename])
self.assertEqual(self.open_calls, [filename])
self.assertTrue(self.open_returns.closed)
self.assertEqual(self.gists_recorder.create_calls, [
{
"some file": {
"content": self.content,
},
}
])
def test_response_printed(self):
"""
The URL in the API's response is printed.
"""
url = "https://something"
response = gist.postGist("reactor", token=self.token, files=[])
self.gists_recorder.create_ | returns.callback(
{
"html_url": url,
}
)
self.successResultOf(response)
self.assertEqual(self.print_calls, [(url,)])
_PostGistCall = namedtuple("_PostGistCall",
["reactor", | "token", "files"])
class RunTests(_FakeOptionsTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_FakePrintTestCaseMixin):
"""
Tests for L{txgithub.scripts.gist.run}
"""
def setUp(self):
super(RunTests, self).setUp()
self.postGist_calls = []
self.postGist_returns = "postGist return value"
self.patch(gist, "Options", lambda: self.options)
self.patch(gist, "_print", self.fake_print)
self.patch(gist, "exit", self.fake_exit)
self.patch(gist, "postGist", self.fake_postGist)
def fake_postGist(self, reactor, token, files):
"""
A fake L{gist.postGist} implementation that records its calls.
"""
self.postGist_calls.append(_PostGistCall(reactor, token, files))
return self.postGist_returns
def test_run_usage_error(self):
"""
A usage error results in a help message and an exit code of 1.
"""
errortext = "error text"
first_line = ': '.join([self.argv0, errortext])
self.options_recorder.parseOptions_raises = usage.UsageError(errortext)
self.assertRaises(_SystemExit,
gist.run, "reactor", self.argv0, "bad args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("bad args",)])
self.assertEqual(len(self.print_calls), 2)
self.assertEqual(self.print_calls[0], (first_line,))
self.assertIn("--help", self.print_calls[1][0])
self.assertEqual(len(self.exit_calls), 1)
[code] = self.exit_calls
self.assertEqual(code, 1)
self.assertNot(self.postGist_calls)
def test_run_ok(self):
"""
The post gist implementation is called with the options
specified on the command line.
"""
reactor = "reactor"
self.options["token"] = "the token"
self.options["files"] = ("file1",)
result = gist.run(reactor, self.argv0, "good args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("good args",)])
self.assertEqual(len(self.postGist_calls), |
from __fut | ure__ import unicode_literals
from django.apps import AppConfig
class WawmembersConfig(AppConfig):
name = 'w | awmembers'
|
d file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it | absolute, like shown here.
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, | as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'eofs'
copyright = '2013-{} Andrew Dawson'.format(time.localtime().tm_year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import eofs
version = eofs.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- extlinks configuration ----------------------------------------------------
# Allow e.g. :issue:`42` and :pr:`42` roles:
extlinks = {'issue': ('https://github.com/ajdawson/eofs/issues/%s', '#'),
'pr': ('https://github.com/ajdawson/eofs/pull/%s', '#')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx13'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['sidebar_toc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eofsdoc'
# Options for intersphinx.
intersphinx_mapping = {
'eof2': ('http://ajdawson.github.com/eof2', None),
'iris': ('http://scitools.org.uk/iris/docs/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'xarray': ('http://xarray.pydata.org/en/stable', None),
'dask': ('https://docs.dask.org/en/latest', None),
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': """\\usepackage{amssymb}
\\usepackage{amsmath}""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('userguide/index', 'userguide.tex', 'eofs User Guide', 'Andrew Dawson',
'manual'),
('examples/index', 'examples.tex', 'eofs Examples', 'Andrew Dawson',
'manual'),
('api/index', 'api.tex', 'eofs API Reference', 'Andrew Dawson',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'eofs', 'eofs Documentation',
['Andrew Dawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo outpu |
# Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
import re
import math
import numpy as N
from . import dates
_formaterror = 'FormatError'
# a format statement in a string
_format_re = re.compile(r'%([-#0-9 +.hlL]*?)([diouxXeEfFgGcrs%])')
def localeFormat(totfmt, args, locale=None):
"""Format using fmt statement fmt, qt QLocale object locale and
arguments to formatting args.
* arguments are not supported in this formatting, nor is using
a dict to supply values for statement
"""
# substitute all format st | atements with string format statements
newfmt = _format_re.sub("%s", totfmt)
# do formatting separately for all statements
strings = []
i = 0
for f in _format_re.finditer(totfmt):
code = f.group(2)
if code == '%':
| s = '%'
else:
try:
s = f.group() % args[i]
i += 1
except IndexError:
raise TypeError("Not enough arguments for format string")
s = s.replace('-', u'\u2212')
if locale is not None and code in 'eEfFgG':
s = s.replace('.', locale.decimalPoint())
strings.append(s)
if i != len(args):
raise TypeError("Not all arguments converted during string formatting")
return newfmt % tuple(strings)
def sciToHuman(val, cleanup=False):
"""Convert output from C formatting to human scientific notation.
if cleanup, remove zeros after decimal points
"""
# split around the exponent
leader, exponent = val.split('e')
# strip off trailing decimal point and zeros if no format args
if cleanup and leader.find('.') >= 0:
leader = leader.rstrip('0').rstrip('.')
# trim off leading 1
if leader == '1' and cleanup:
leader = ''
else:
# add multiply sign
leader += u'\u00d7'
return '%s10^{%i}' % (leader, int(exponent))
def formatSciNotation(num, formatargs, locale=None):
"""Format number into form X \times 10^{Y}.
This function trims trailing zeros and decimal point unless a formatting
argument is supplied
This is similar to the %e format string
formatargs is the standard argument in a format string to control the
number of decimal places, etc.
locale is a QLocale object
"""
# handle nan, inf, -inf
if not N.isfinite(num):
return str(num)
# create an initial formatting string
if formatargs:
formatstr = '%' + formatargs + 'e'
else:
formatstr = '%.10e'
# do formatting, catching errors
try:
text = formatstr % num
except:
return _formaterror
text = sciToHuman(text, cleanup=formatargs=='')
# do substitution of decimals
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
def formatGeneral(num, fmtarg, locale=None):
"""General formatting which switches from normal to scientic
notation."""
if fmtarg:
# if an argument is given, we convert output
try:
retn = ('%'+fmtarg+'g') % num
except ValueError:
retn = _formaterror
if retn.find('e') >= 0:
# in scientific notation, so convert
retn = sciToHuman(retn, cleanup=False)
else:
a = abs(num)
# manually choose when to switch from normal to scientific
# as the default %g isn't very good
if a >= 1e4 or (a < 1e-2 and a > 1e-110):
retn = formatSciNotation(num, fmtarg, locale=locale)
else:
retn = '%.10g' % num
if locale is not None:
# replace decimal point with correct decimal point
retn = retn.replace('.', locale.decimalPoint())
return retn
engsuffixes = ( 'y', 'z', 'a', 'f', 'p', 'n',
u'\u03bc', 'm', '', 'k', 'M', 'G',
'T', 'P', 'E', 'Z', 'Y' )
def formatEngineering(num, fmtarg, locale=None):
"""Engineering suffix format notation using SI suffixes."""
if num != 0.:
logindex = math.log10( abs(num) ) / 3.
# for numbers < 1 round down suffix
if logindex < 0. and (int(logindex)-logindex) > 1e-6:
logindex -= 1
# make sure we don't go out of bounds
logindex = min( max(logindex, -8),
len(engsuffixes) - 9 )
suffix = engsuffixes[ int(logindex) + 8 ]
val = num / 10**( int(logindex) *3)
else:
suffix = ''
val = num
text = ('%' + fmtarg + 'g%s') % (val, suffix)
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
# catch general veusz formatting expression
_formatRE = re.compile(r'%([-0-9.+# ]*)(VDVS|VD.|V.|[A-Za-z%])')
def formatNumber(num, formatstr, locale=None):
""" Format a number in different ways.
formatstr is a standard C format string, with some additions:
%Ve scientific notation X \times 10^{Y}
%Vg switches from normal notation to scientific outside 10^-2 to 10^4
%VE engineering suffix option
%VDx date formatting, where x is one of the arguments in
http://docs.python.org/lib/module-time.html in the function
strftime
"""
outitems = []
while formatstr:
# repeatedly try to do string format
match = _formatRE.search(formatstr)
if not match:
outitems.append(formatstr)
break
# argument and type of formatting
farg, ftype = match.groups()
# special veusz formatting
if ftype[:1] == 'V':
# special veusz formatting
if ftype == 'Ve':
out = formatSciNotation(num, farg, locale=locale)
elif ftype == 'Vg':
out = formatGeneral(num, farg, locale=locale)
elif ftype == 'VE':
out = formatEngineering(num, farg, locale=locale)
elif ftype[:2] == 'VD':
d = dates.floatToDateTime(num)
# date formatting (seconds since start of epoch)
if ftype[:4] == 'VDVS':
# special seconds operator
out = ('%'+ftype[4:]+'g') % (d.second+d.microsecond*1e-6)
else:
# use date formatting
try:
out = d.strftime(str('%'+ftype[2:]))
except ValueError:
out = _formaterror
else:
out = _formaterror
# replace hyphen with true minus sign
out = out.replace('-', u'\u2212')
elif ftype == '%':
out = '%'
else:
# standard C formatting
try:
out = localeFormat('%' + farg + ftype, (num,), locale=locale)
except:
out = _formaterror
outitems.append(formatstr[:match.start()])
outitems.append(out)
formatstr = formatstr[match.end():]
return ''.join(outitems)
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTables
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-area120-tables
# [START area120tables_v1alpha1_generated_TablesService_ListTables_async]
from google.area120 import tables_v1alpha1
async def sample_list_tables():
# Create a client
client = tables_v1alpha1.TablesServiceAsyncClient()
# Initialize request argument(s)
request = tables_v1alpha1.ListTablesRequest(
)
# Make the request
page_result = client.list_tables(request=request)
# Handle the respons | e
async for response in page_result:
print(response)
# [ | END area120tables_v1alpha1_generated_TablesService_ListTables_async]
|
tring args[]) {
%s
}
}
"""
return template % content_to_add
def filter_type_in_method(clazz, the_type, method_name):
""" yields the result of filtering the given class for the given
type inside the given method identified by its name.
"""
for path, node in clazz.filter(the_type):
for p in reversed(path):
if isinstance(p, tree.MethodDeclaration):
if p.name == method_name:
yield path, node
class LambdaSupportTest(unittest.TestCase):
""" Contains tests for java 8 lambda syntax. """
def assert_contains_lambda_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given tree contains a method with the supplied
method name containing a lambda expression.
"""
matches = list(filter_type_in_method(
clazz, tree.LambdaExpression, method_name))
if not matches:
self.fail('No matching lambda expression found.')
return matches
def test_lambda_support_no_parameters_no_body(self):
""" tests support for lambda with no parameters and no body. """
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("() -> {};")))
def test_lambda_support_no_parameters_expression_body(self):
""" tests support for lambda with no parameters and an
expression body.
"""
test_classes = [
setup_java_class("() -> 3;"),
setup_java_class("() -> null;"),
setup_java_class("() -> { return 21; };"),
setup_java_class("() -> { System.exit(1); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_lambda_support_no_parameters_complex_expression(self):
""" tests support for lambda with no parameters and a
complex expression body.
"""
code = """
() -> {
if (true) return 21;
else
{
int result = 21;
return result / 2;
}
};"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class(code)))
def test_parameter_no_type_expression_body(self):
""" tests support for lambda with parameters with inferred types. """
test_classes = [
setup_java_class("(bar) -> bar + 1;"),
setup_java_class("bar -> bar + 1;"),
setup_java_class("x -> x.length();"),
setup_java_class("y -> { y.boom(); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameter_with_type_expression_body(self):
""" tests support for lambda with parameters with formal types. """
test_classes = [
setup_java_class("(int foo) -> { return foo + 2; };"),
setup_java_class("(String s) -> s.length();"),
setup_java_class("(int foo) -> foo + 1;"),
setup_java_class("(Thread th) -> { th.start(); };"),
setup_java_class("(String foo, String bar) -> "
"foo + bar;"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameters_with_no_type_expression_body(self):
""" tests support for multiple lambda parameters
that are specified without their types.
"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("(x, y) -> x + y;")))
def test_parameters_with_mixed_inferred_and_declared_types(self):
""" this tests that lambda type specification mixing is considered
invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, int y) -> x+y;"))
def test_parameters_inferred_types_with_modifiers(self):
""" this tests that lambda inferred type parameters with modifiers are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, final y) -> x+y;"))
def test_invalid_parameters_are_invalid(self):
""" this tests that invalid lambda parameters are are
considered invalid as per the specifications.
"""
| with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(a b c) -> {};"))
def test_cast_works(self):
""" this tests that a cast expression works as expected. """
parse.parse(setup_java_class("String x = (String) A.x() ;"))
class MethodReferenceSyntaxTest(unittest.TestCase):
""" Contains tests for java 8 method reference syntax. "" | "
def assert_contains_method_reference_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given class contains a method with the supplied
method name containing a method reference.
"""
matches = list(filter_type_in_method(
clazz, tree.MethodReference, method_name))
if not matches:
self.fail('No matching method reference found.')
return matches
def test_method_reference(self):
""" tests that method references are supported. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::length;")))
def test_method_reference_to_the_new_method(self):
""" test support for method references to 'new'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::new;")))
def test_method_reference_to_the_new_method_with_explict_type(self):
""" test support for method references to 'new' with an
explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::<String> new;")))
def test_method_reference_from_super(self):
""" test support for method references from 'super'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("super::toString;")))
def test_method_reference_from_super_with_identifier(self):
""" test support for method references from Identifier.super. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String.super::toString;")))
@unittest.expectedFailure
def test_method_reference_explicit_type_arguments_for_generic_type(self):
""" currently there is no support for method references
for an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("List<String>::size;")))
def test_method_reference_explicit_type_arguments(self):
""" test support for method references with an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("Arrays::<String> sort;")))
@unittest.expectedFailure
def test_method_reference_from_array_type(self):
""" currently there is no support for method references
from a primary type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("int[]::new;")))
class InterfaceSupportTest(unittest.TestCase):
""" Contains tests for java 8 interface extensions. """
def test_interface_support_static_methods(self):
parse.parse("""
interface Foo {
void foo();
static Foo create() {
return new Foo() {
@Override
void foo() {
System.out.println("foo");
}
};
}
}
""")
def test_interface_support_default_met |
'ping'
assert wire.poll(closest[0]) is None
wire.empty()
assert wire.messages == []
def test_eviction():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
proto.recv_pong(node, msg[2])
# expect no message and that node is still there
assert wire.messages == []
assert node in proto.routing
# expect node to be on the tail
assert proto.routing.bucket_by_node(node).tail == node
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_timeout():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
gevent.sleep(kademlia.k_request_timeout)
proto.recv_pong(node, msg[2])
# expect no message and that is not there anymore
assert wire.messages == []
assert node not in proto.routing
# expect node not to be in the replacement_cache
assert node not in proto.routing.bucket_by_node(node).replacement_cache
def test_eviction_node_active():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, so add won't split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and not b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert not bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket was not split
assert len(bucket) == kademlia.k_bucket_size
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node not in proto.routing
# expect a ping to bucket.head
msg = wire.poll(eviction_candidate)
assert msg[0] == 'ping'
assert msg[1] == proto.this_node
assert len(proto._expected_pongs) == 1
expected_pingid = proto._expected_pongs.keys()[0]
assert len(expected_pingid) == 96
echo = expected_pingid[:32]
assert len(echo) == 32
assert wire.messages == []
# reply in time
# can not check w/o mcd
print 'sending pong'
proto.recv_pong(eviction_candidate, echo)
# expect no other messages
assert wire.messages == []
# expect node was not added
assert node not in proto.routing
# eviction_candidate is around and was promoted to bucket.tail
assert eviction_candidate in proto.routing
assert eviction_candidate == bucket.tail
# expect node to be in the replacement_cache
assert node in bucket.replacement_cache
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_node_inactive():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, so add won't split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and not b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert not bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket was not split
assert len(bucket) == kademlia.k_bucket_size
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node not in proto.routing
# expect a ping to bucket.head
msg = wire.poll(eviction_candidate)
assert msg[0] == 'ping'
assert msg[1] == proto.this_node
assert len(proto._expected_pongs) == 1
expected_pingid = proto._expected_pongs.keys()[0]
assert len(expected_pingid) == 96
echo = expected_pingid[:32]
assert len(echo) == 32
assert wire.messages == []
# reply late
gevent.sleep(kademlia.k_request_timeout)
proto.recv_pong(eviction_candidate, echo)
# expect no other messages
assert wire.messages == []
# expect node was not added
assert node in proto.routing
# eviction_candidate is around and was promoted to bucket.tail
assert eviction_candidate not in proto.routing
assert node == bucket.tail
# expect node to be in the replacement_cache
assert eviction_candidate not in bucket.replacement_cache
def test_eviction_node_split():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(1000) # set lpw, so we'll split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node in proto.routing
# expect no ping to bucket.head
assert not wire.poll(eviction_candidate)
assert wire.messages == []
# expect node was not added
assert node in proto.routing
# eviction_candidate is around and was unchanged
assert eviction_candidate == bucket.head
def test_ | ping_adds_sender():
p = get_wired_protocol()
assert len(p.routing) == 0
for i in range(10):
n = random_node()
p.recv_ping(n, 'some id %d' % i)
assert len(p.routing) == i + 1
p.wire.empty()
def test_two():
print
one = get_wired_protocol()
one.routing = routing_table(100)
two = get_wired_protocol()
wire = one.wire
assert one.this_node != two.this_node
two.ping(one.this_node)
# print 'messages', wire.messages
wire.process | ([one, two])
two.find_node(two.this_node.id)
# print 'messages', wire.messages
msg = wire.process([one, two], steps=2)
# print 'messages', wire.messages
assert len(wire.messages) >= kademlia.k_bucket_size
msg = wire.messages.pop(0)
assert msg[1] == 'find_node'
for m in wire.messages[kademlia.k_find_concurrency:]:
assert m[1] == 'ping'
wire.empty()
def test_many(num_nodes=17):
WireMock.empty()
assert num_nodes >= kademlia.k_bucket_size + 1
protos = []
for i in range(num_nodes):
protos.append(get_wired_protocol())
bootstrap = protos[0]
wire = bootstrap.wire
# bootstrap
for p in protos[1:]:
p.bootstrap([bootstrap.this_node])
wire.process(protos) # successively add nodes
# now everbody does a find node to fill the buckets
for p in protos[1:]:
p.find_node(p.this_node.id)
wire.process(protos) # can all send in parallel
for i, p in enumerate(protos):
# print i, len(p.routing)
assert |
# -*- coding:utf-8 -*-
__author__ = 'chenjun'
import torch
from torch.autograd import Variable
from utils.util import *
"""Beam search module.
Beam search takes the top K results from the model, predicts the K results for
each of the previous K result, getting K*K results. Pick the top K results from
K*K results, and start over again until certain number of results are fully
decoded.
"""
class Hypothesis(object):
"""Defines a hypothesis during beam search."""
def __init__(self, tokens, log_prob, state):
"""Hypothesis constructor.
Args:
tokens: start tokens for decoding.
log_prob: log prob of the start tokens, usually 1.
state: decoder state.
"""
self.tokens = tokens
self.log_prob = log_prob
self.state = state
def extend(self, token, log_prob, new_state):
"""Extend the hypothesis with result from latest step.
Args:
token: latest token from decoding.
log_prob: log prob of the latest decoded tokens.
new_state: decoder output state. Fed to the decoder for next step.
Returns:
New Hypothesis with the results from latest step.
"""
return Hypothesis(self.tokens + [token], self.log_prob + log_prob, new_state)
@property
def latest_token(self):
return self.tokens[-1]
@property
def sequence_tokens(self):
return self.tokens
@property
def decode_state(self):
return self.state
class BeamSearch(object):
"""Beam search for generation."""
def __init__(self, vocab_size, beam_size, state=None):
"""
beam search init.
:param vocab_size: target vocab size
:param beam_size: beam size
"""
self.beam_size = beam_size
self.vocab_size = vocab_size
self.hypothesis = [Hypothesis([], 0.0, state)] * self.beam_size
self.results = []
def top_hypothesis(self, hypothesis, normalize=False):
"""
sort the hypothesis list based on log_probs and length.
:param hypothesis: list of hypothesis
:param normalize: bool, normalized by length, only for last search to output
:return:
"""
# This length normalization is only effective for the final results.
if normalize:
return sorted(hypothesis, key=lambda h: h.log_prob/len(h.tokens), reverse=True)
else:
return sorted(hypothesis, key=lambda h: h.log_prob, reverse=True)
def variable(self, token):
"""
convert token to torch variable.
:param token: int
:return:
"""
return Variable(torch.LongTensor([[token]]))
def beam_search(self, inputs):
"""
beam search to generate sequence.
:param inputs: list of decoder outputs, (decoder_out, decode_state)
:return:
"""
all_hypothesis = []
for i, (input, state) in enumerate(inputs):
top_log_probs, top_tokens = input.data.topk(self.vocab_size)
for j in xrange(self.beam_size*2):
token = top_tokens[0][j] # value
log_prob = top_log_probs[0][j] # value
| all_hypothesis.append(self.hypothesis[i].extend(token, log_prob, state))
# Filter and collect any hypotheses that have the end token.
self.hypothesis = []
for h in self.top_hypothesis(all_hypothesis):
if h.latest_token == EOS_token:
# Pull the hypothesis off the beam if the end t | oken is reached.
self.results.append(h)
else:
# Otherwise continue to the extend the hypothesis.
self.hypothesis.append(h)
if len(self.hypothesis) == self.beam_size or len(self.results) == self.beam_size:
break
outputs = [(self.variable(hyp.latest_token), hyp.decode_state) for hyp in self.hypothesis]
return outputs
def generate(self, num):
"""
return top num of generated sequence tokens.
:return:
"""
generates = [hyp.sequence_tokens for hyp in self.top_hypothesis(self.results, normalize=True)[:num]]
return generates
|
# -*- coding: utf-8 -*-
import subprocess
import os
cmd=['/Users/jehlke/workspace/epywing/src/epywing/utils/mecab/bin/mecab',
'-Owakati', '--dicdir=mecab/dic/ipadic']
#cmd = ['mecab', '-Owakati', '--dicdir=../dic/ipadic']
a = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
a.stdin.write(u'何~これですか what is that HUH OK I SEE ?\n\n'.encode('utf8'))
a.stdin.flush()
b = unicode(a.stdout.readline().decod | e('utf8'))
print 'test'
print b.strip()#.split()
print 'te | st'
|
record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration, database):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower(),
verbosity=self.verbosity,
interactive=self.interactive,
db=database)
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration, database)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
south.db.db.dry_run = True
# preserve the constr | aint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_functi | on = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target, db=database)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
try:
from django.utils.timezone import now
record.applied = now()
except ImportError:
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
if migration.no_dry_run():
return " (migration can |
#!/usr/bin/python
import sys, time
for ts in sys.argv[1:]:
| print ts, time.ctime(float(ts))
sys.exit(0 | )
|
# __author__ = MelissaChan
# -*- coding: utf-8 -*-
# 16-4-16 下午10:53
import MySQLdb
def connect(id,name,gender,region,status,date,inter):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd=' ',port=3306)
cur = conn.cursor()
# cur.exe | cute('create database if not exists PythonDB')
conn.select_db('Facebook')
# cur.execute('create table Test(id int,name varchar(20),info varchar(20))')
value = [id,name,gender,region,status,date,inter]
cur.execute('insert into info values(%s,%s,%s,%s,%s,%s,%s)',value)
# values = []
# for i in range(20):
# values.append((i,'H | ello World!','My number is '+str(i)))
#
# cur.executemany('insert into Test values(%s,%s,%s)',values)
# cur.execute('update Test set name="ACdreamer" where id=3')
conn.commit()
cur.close()
conn.close()
print 'insert ok~'
except MySQLdb.Error,msg:
print "MySQL Error %d: %s" %(msg.args[0],msg.args[1])
|
import sys | , re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print | fn, obj
print '.'*50
print doc
print '.'*50
print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/
|
import components
def AclContentCacheTest ():
"""ACL content cache test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'cc', 'f'],\
| ['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_cc', 'ip_f'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net | , ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
cc = components.AclContentCache(ctx.cc, net, ctx)
f = components.AclFirewall(ctx.f, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(f, ctx.ip_f), \
(cc, ctx.ip_cc)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_cc, ctx.ip_f]
net.RoutingTable(a, [(x, f) for x in addresses])
net.RoutingTable(b, [(x, f) for x in addresses])
net.RoutingTable(c, [(x, f) for x in addresses])
net.RoutingTable(d, [(x, f) for x in addresses])
net.RoutingTable(f, [(x, cc) for x in addresses])
net.RoutingTable(cc, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
net.Attach(a, b, c, d, cc)
endhosts = [a, b, c, d]
f.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
cc.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
net.Attach(a, b, c, d, cc, f)
endhosts = [a, b, c, d]
class AclContentCacheReturn (object):
def __init__ (self, net, ctx, a, b, c, d, cc, f):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.cc = cc
self.f = f
self.check = components.PropertyChecker (ctx, net)
return AclContentCacheReturn(net, ctx, a, b, c, d, cc, f)
|
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.rates_response import RatesResponse # noqa: E501
from swagger_client.rest im | port ApiException
class TestRatesResponse(unittest.TestCase):
"""RatesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRatesResponse(self):
"""Test RatesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.rates_response.RatesResponse() # noqa: E | 501
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
from ops_i2cbase import I2CBase
# ===========================================================================
# SI1145 Class
#
# Ported from github.com/adafruit/Adafruit_SI1145_Library/
# ===========================================================================
class SI1145:
i2c = None
# SI1145 Address
address = 0x60
# Commands
SI1145_PARAM_QUERY = 0x80
SI1145_PARAM_SET = 0xA0
SI1145_PSALS_AUTO = 0x0F
# Parameters
SI1145_PARAM_I2CADDR = 0x00
SI1145_PARAM_CHLIST = 0x01
SI1145_PARAM_CHLIST_ENUV = 0x80
SI1145_PARAM_CHLIST_ENAUX = 0x40
SI1145_PARAM_CHLIST_ENALSIR = 0x20
SI1145_PARAM_CHLIST_ENALSVIS = 0x10
SI1145_PARAM_CHLIST_ENPS1 = 0x01
SI1145_PARAM_CHLIST_ENPS2 = 0x02
SI1145_PARAM_CHLIST_ENPS3 = 0x04
# Registers
SI1145_REG_PARTID = 0x00
SI1145_REG_UCOEFF0 = 0x13
SI1145_REG_UCOEFF1 = 0x14
SI1145_REG_UCOEFF2 = 0x15
SI1145_REG_UCOEFF3 = 0x16
SI1145_REG_PARAMWR = 0x17
SI1145_REG_COMMAND = 0x18
SI1145_REG_MEASRATE0 = 0x08
SI1145_REG_MEASRATE1 = 0x09
# Constructor
def __init__(self):
# I2C
self.i2c = I2CBase(self.address)
id = self.i2c.readU8(self.SI1145_REG_PARTID)
if (id != 0x45):
print "SI1145 is not found"
# to enable UV reading, set | the EN_UV bit in CHLIST, and configure UCOEF [0:3] to the default values of 0x7B, 0x6B, 0x01, and 0x00.
self.i2c.write8(self.SI1145_REG_UCOEFF0, 0x7B)
self.i2c.write8(self.SI1145_REG_UCOEFF1, 0x6B)
self.i2c.write8(self.SI1145_REG_UCOEFF2, 0x01)
self.i2c.write8(self.SI1145_REG_UCOEFF3, 0x00)
# enable UV sensor
self.i2c.write8(self.SI1145_RE | G_PARAMWR, self.SI1145_PARAM_CHLIST_ENUV | self.SI1145_PARAM_CHLIST_ENALSIR | self.SI1145_PARAM_CHLIST_ENALSVIS | self.SI1145_PARAM_CHLIST_ENPS1)
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PARAM_CHLIST | self.SI1145_PARAM_SET)
# measurement rate for auto
self.i2c.write8(self.SI1145_REG_MEASRATE0, 0xFF)
# auto run
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PSALS_AUTO)
def readUVIndex(self):
"Read UV index data from sensor (UV index * 100)"
rawData = self.i2c.readU16(0x2C)
if rawData > 0x0258:
return 0x0258
else:
return rawData
def readAmbientLight(self):
"Read Ambient Light data from sensor (Visible light + IR) in lux"
rawData = self.i2c.readU16(0x22)
return rawData
def readIRLight(self):
"Read IR data from sensor in lux"
rawData = self.i2c.readU16(0x24)
return rawData
|
#
# This is the container for the palettes. To change them
# simply edit this.
#
from numpy import *
NTSC = array([
[0x00,0x00,0x00],[0x40,0x40,0x40],[0x6C,0x6C,0x6C],[0x90,0x90,0x90],
[0xB0,0xB0,0xB0],[0xC8,0xC8,0xC8],[0xDC,0xDC,0xDC],[0xEC,0xEC,0xEC],
[0x44,0x44,0x00],[0x64,0x64,0x10],[0x84,0x84,0x24],[0xA0,0xA0,0x34],
[0xB8,0xB8,0x40],[0xD0,0xD0,0x50],[0xE8,0xE8,0x5C],[0xFC,0xFC,0x68],
[0x70,0x28,0x00],[0x84,0x44,0x14],[0x98,0x5C,0x28],[0xAC,0x78,0x3C],
[0xBC,0x8C,0x4C],[0xCC,0xA0,0x5C],[0xDC,0xB4,0x68],[0xEC,0xC8,0x78],
[0x84,0x18,0x00],[0x98,0x34,0x18],[0xAC,0x50,0x30],[0xC0,0x68,0x48],
[0xD0,0x80,0x5C],[0xE0,0x94,0x70],[0xEC,0xA8,0x80],[0xFC,0xBC,0x94],
[0x88,0x00,0x00],[0x9C,0x20,0x20],[0xB0,0x3C,0x3C],[0xC0,0x58,0x58],
[0xD0,0x70,0x70],[0xE0,0x88,0x88],[0xEC,0xA0,0xA0],[0xFC,0xB4,0xB4],
[0x78,0x00,0x5C],[0x8C,0x20,0x74],[0xA0,0x3C,0x88],[0xB0,0x58,0x9C],
[0xC0,0x70,0xB0],[0xD0,0x84,0xC0],[0xDC,0x9C,0xD0],[0xEC,0xB0,0xE0],
[0x48,0x00,0x78],[0x60,0x20,0x90],[0x78,0x3C,0xA4],[0x8C,0x58,0xB8],
[0xA0,0x70,0xCC],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x14,0x00,0x84],[0x30,0x20,0x98],[0x4C,0x3C,0xAC],[0x68,0x58,0xC0],
[0x7C,0x70,0xD0],[0x94,0x88,0xE0],[0xA8,0xA0,0xEC],[0xBC,0xB4,0xFC],
[0x00,0x00,0x88],[0x1C,0x20,0x9C],[0x38,0x40,0xB0],[0x50,0x5C,0xC0],
[0x68,0x74,0xD0],[0x7C,0x8C,0xE0],[0x90,0xA4,0xEC],[0xA4,0xB8,0xFC],
[0x00,0x18,0x7C],[0x1C,0x38,0x90],[0x38,0x54,0xA8],[0x50,0x70,0xBC],
[0x68,0x88,0xCC],[0x7C,0x9C,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x00,0x2C,0x5C],[0x1C,0x4C,0x78],[0x38,0x68,0x90],[0x50,0x84,0xAC],
[0x68,0x9C,0xC0],[0x7C,0xB4,0xD4],[0x90,0xCC,0xE8],[0xA4,0xE0,0xFC],
[0x00,0x3C,0x2C],[0x1C,0x5C,0x48],[0x38,0x7C,0x64],[0x50,0x9C,0x80],
[0x68,0xB4,0x94],[0x7C,0xD0,0xAC],[0x90,0xE4,0xC0],[0xA4,0xFC,0xD4],
[0x00,0x3C,0x00],[0x20,0x5C,0x20],[0x40,0x7C,0x40],[0x5C,0x9C,0x5C],
[0x74,0xB4,0x74],[0x8C,0xD0,0x8C],[0xA4,0xE4,0xA4],[0xB8,0xFC,0xB8],
[0x14,0x38,0x00],[0x34,0x5C,0x1C],[0x50,0x7C,0x38],[0x6C,0x98,0x50],
[0x84,0xB4,0x68],[0x9C,0xCC,0x7C],[0xB4,0xE4,0x90],[0xC8,0xFC,0xA4],
[0x2C,0x30,0x00],[0x4C,0x50,0x1C],[0x68,0x70,0x34],[0x84,0x8C,0x4C],
[0x9C,0xA8,0x64],[0xB4,0xC0,0x78],[0xCC,0xD4,0x88],[0xE0,0xEC,0x9C],
[0x44,0x28,0x00],[0x64,0x48,0x18],[0x84,0x68,0x30],[0xA0,0x84,0x44],
[0xB8,0x9C,0x58],[0xD0,0xB4,0x6C],[0xE8,0xCC,0x7C],[0xFC,0xE0,0x8C]
],uint8)
PAL = array([
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x80,0x58,0x00],[0x94,0x70,0x20],[0xA8,0x84,0x3C],[0xBC,0x9C,0x58],
[0xCC,0xAC,0x70],[0xDC,0xC0,0x84],[0xEC,0xD0,0x9C],[0xFC,0xE0,0xB0],
[0x44,0x5C,0x00],[0x5C,0x78,0x20],[0x74,0x90,0x3C],[0x8C,0xAC,0x58],
[0xA0,0xC0,0x70],[0xB0,0xD4,0x84],[0xC4,0xE8,0x9C],[0xD4,0xFC,0xB0],
[0x70,0x34,0x00],[0x88,0x50,0x20],[0xA0,0x68,0x3C],[0xB4,0x84,0x58],
[0xC8,0x98,0x70],[0xDC,0xAC,0x84],[0xEC,0xC0,0x9C],[0xFC,0xD4,0xB0],
[0x00,0x64,0x14],[0x20,0x80,0x34],[0x3C,0x98,0x50],[0x58,0xB0,0x6C],
[0x70,0xC4,0x84],[0x84,0xD8,0x9C],[0x9C,0xE8,0xB4],[0xB0,0xFC,0xC8],
[0x70,0x00,0x14],[0x88,0x20,0x34],[0xA0,0x3C,0x50],[0xB4,0x58,0x6C],
[0xC8,0x70,0x84],[0xDC,0x84,0x9C],[0xEC,0x9C,0xB4],[0xFC,0xB0,0xC8],
[0x00,0x5C,0x5C],[0x20,0x74,0x74],[0x3C,0x8C,0x8C],[0x58,0xA4,0xA4],
[0x70,0xB8,0xB8],[0x84,0xC8,0xC8],[0x9C,0xDC,0xDC],[0xB0,0xEC,0xEC],
[0x70,0x00,0x5C],[0x84,0x20,0x74],[0x94,0x3C,0x88],[0xA8,0x58,0x9C],
[0xB4,0x70,0xB0],[0xC4,0x84,0xC0],[0xD0,0x9C,0xD0],[0xE0,0xB0,0xE0],
[0x00,0x3C,0x70],[0x1C,0x58,0x88],[0x38,0x74,0xA0],[0x50,0x8C,0xB4],
[0x68,0xA4,0xC8],[0x7C,0xB8,0xDC],[0x90,0xCC,0xEC],[0xA4,0xE0,0xFC],
[0x58,0x00,0x70],[0x6C,0x20,0x88],[0x80,0x3C,0xA0],[0x94,0x58,0xB4],
[0xA4,0x70,0xC8],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x00,0x20,0x70],[0x1C,0x3C,0x88],[0x38,0x58,0xA0],[0x50,0x74,0xB4],
[0x68,0x88,0xC8],[0x7C,0xA0,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x3C,0x00,0x80],[0x54,0x20,0x94],[0x6C,0x3C,0xA8],[0x80,0x58,0xBC],
[0x94,0x70,0xCC],[0xA8,0x84,0xDC],[0xB8,0x9C,0xEC],[0xC8,0xB0,0xFC],
[0x00,0x00,0x88],[0x20,0x20,0x9C],[0x3C,0x3C,0xB0],[0x58,0x58,0xC0],
[0x70,0x70,0xD0],[0x84,0x84,0xE0],[0x9C,0x9C,0xEC],[0xB0,0xB0,0xFC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB | 4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC]
],uint8)
SECAM = repeat([[0x00,0x00,0x00],
[0x21,0x21,0xFF],
[0xF0,0x3C,0x79],
[0xFF,0x50 | ,0xFF],
[0x7F,0xFF,0x00],
[0x7F,0xFF,0xFF],
[0xFF,0xFF,0x3F],
[0xFF,0xFF,0xFF]],16).astype(uint8)
|
#!/home/mharris/Projects/DevOpsDays/venv/bin/python2
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@pytho | n.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
| locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
|
#Schedule-generator for LHL use written by Acebulf (acebulf at gmail.com)
#Current version 1.0 -- Jan 16 2014
#Copyrighted under the MIT License (see License included in the github repo)
import random
import time
while 1:
print "Starting random-schedule generation process..."
starttime = time.time()
kill = False
schedule = [[]]*30
teams = ["BOS", "CHI", "COL", "DET", "NJD", "WSH"]
# Randomly Choose Team
team1 = random.choice(teams)
teams_mt1 = list(teams)
teams_mt1.remove(team1)
matchups = []
for x in teams_mt1:
for y in xrange(6):
matchups.append((team1,x))
random.shuffle(matchups)
for x in xrange(30):
schedule[x]=[matchups[x]]
team2 = random.choice(teams_mt1)
teams_2 = list(teams_mt1)
teams_2.remove(team2)
matchups=[]
for x in teams_2:
for y in xrange(6):
matchups.append((team2,x))
random.shuffle(matchups)
days = range(30)
def playing_day(team, day):
occupied = [i[0] for i in day] + [i[1] for i in day]
return (team in occupied)
for matchup in matchups:
while 1:
temp_day = random.choice(days)
if time.time()-starttime >= 4:
kill = True
break
if not playing_day(matchup[0],schedule[temp_day]) and not playing_day(matchup[1],schedule[temp_day]):
schedule[temp_day].append(matchup)
days.remove(temp_day)
break
if kill:
print "Error in stage 1; restarting"
continue
print "Stage 1/3 Successfully Completed!"
days2games = list(schedule)
days1game = []
try:
for x in xrange(30):
if len(days2games[x]) == 1:
days1game.append(days2games.pop(x))
except IndexError:
pass
team3 = random.choice(teams_2)
teams_3 = list(teams_2)
teams_3.remove(team3)
matchups=[]
for x in teams_3:
matchups.append((team3,x))
team4 = random.choice(teams_3)
teams_4 = list(teams_3)
teams_4.remove(team4)
for x in teams_4:
matchups.append((team4,x))
matchups.append((teams_4[0],teams_4[1]))
for x in days2games:
for y in matchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
x.append(y)
newmatchups = []
for x in matchups:
newmatchups.append(x)
newmatchups.append(x)
random.shuffle(newmatchups)
print "Stage 2/3 Successfully Completed!"
| for x in days1game:
for y in newmatchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
| x.append(y)
newmatchups.remove(y)
for x in schedule:
if len(x) != 3:
print "Problem encountered in stage 3; Restarting..."
kill=True
break
if kill:
continue
print "Stage 3/3 Successfully Completed"
break
print "Schedule Successfully Generated"
print "Printing to File..."
f = open("schedule.txt","w")
dayno = 0
while dayno <= 29:
f.write("Day {0}:\n".format(dayno+1))
for x in schedule[dayno]:
f.write(x[0] + " - " + x[1]+"\n")
f.write("\n")
dayno += 1
print "Result written to file. Program terminating."
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_REQUEST = b''
_RESPONSE = b''
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
return _RESPONSE
def handle_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
| self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = | handle_stream_stream
elif self.request_streaming:
self.stream_unary = handle_stream_unary
elif self.response_streaming:
self.unary_stream = handle_unary_stream
else:
self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True)
else:
return None
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
self.assertEqual(_RESPONSE, response)
def testUnaryStream(self):
response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
def testStreamUnary(self):
response = self._channel.stream_unary(_STREAM_UNARY)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertEqual(_RESPONSE, response)
def testStreamStream(self):
response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
"""
sentry.plugins.sentry_useragents.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import httpagentparser
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class UserAgentPlugin(TagPlugin):
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
def get_tag_values(self, event):
http = event.interfaces.get('sentry.interfaces.Http')
if n | ot http:
return []
if not http.headers:
return []
if 'User-Agent' not in http.headers:
return []
ua = httpagentparser.detect(http.headers['User-Agent'])
if not ua:
return []
result = self.get_tag_from_ua(ua)
if not result:
return []
return [res | ult]
class BrowserPlugin(UserAgentPlugin):
"""
Automatically adds the 'browser' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'browsers'
title = _('Auto Tag: Browsers')
tag = 'browser'
tag_label = _('Browser Name')
def get_tag_from_ua(self, ua):
if 'browser' not in ua:
return
tag = ua['browser']['name']
if 'version' in ua['browser']:
tag += ' ' + ua['browser']['version']
return tag
register(BrowserPlugin)
class OsPlugin(UserAgentPlugin):
"""
Automatically adds the 'os' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'os'
title = _('Auto Tag: Operating Systems')
tag = 'os'
tag_label = _('Operating System')
def get_tag_from_ua(self, ua):
if 'flavor' in ua:
tag = ua['flavor']['name']
if 'version' in ua['flavor']:
tag += ' ' + ua['version']
elif 'os' in ua:
# Linux
tag = ua['os']['name']
if 'version' in ua['os']:
tag += ' ' + ua['version']
elif 'dist' in ua:
# Ubuntu
tag += ua['dist']['name']
else:
return
return tag
register(OsPlugin)
|
pache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import unittest
from synapse.events import FrozenEvent
from synapse.events.utils import prune_event, serialize_event
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class PruneEventTestCase(unittest.TestCase):
""" Asserts that a new event constructed with `evdict` will look like
`matchdict` when it is redacted. """
def run_test(self, evdict, matchdict):
self.assertEquals(
prune_event(FrozenEvent(evdict)).get_dict(),
matchdict
)
def test_minimal(self):
self.run_test(
{
'type': 'A',
'event_id': '$test:domain',
},
{
'type': 'A',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_basic_keys(self):
self.run_test(
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
},
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_unsigned_age_ts(self):
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'age_ts': 20},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {'age_ts': 20},
}
)
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'other_key': 'here'},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_content(self):
self.run_test(
{
'type': 'C',
'event_id': '$test:domain',
'content': {'things': 'here'},
},
{
'type': 'C',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
self.run_test(
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain', 'other_field': 'here'},
},
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain'},
'signatures': {},
'unsigned': {},
}
)
class SerializeEventTestCase(unittest.TestCase):
def serialize(self, ev, fields):
return serialize_event(ev, 1479807801915, only_event_fields=fields)
def test_event_fields_works_with_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar"
),
["room_id"]
),
{
"room_id": "!foo:bar",
}
)
def test_event_fields_works_with_nested_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"body": "A message",
},
),
["content.body"]
),
{
"content": {
"body": "A message",
}
}
)
def test_event_fields_works_with_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"key.with.dots": {},
},
),
["content.key\.with\.dots"]
),
{
"content": {
| "key.with.dots": {},
}
}
)
def test_event_fields_works_with_nested_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"not_me": 1,
| "nested.dot.key": {
"leaf.key": 42,
"not_me_either": 1,
},
},
),
["content.nested\.dot\.key.leaf\.key"]
),
{
"content": {
"nested.dot.key": {
"leaf.key": 42,
},
}
}
)
def test_event_fields_nops_with_unknown_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
["content.foo", "content.notexists"]
),
{
"content": {
"foo": "bar",
}
}
)
def test_event_fields_nops_with_non_dict_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.am"]
),
{}
)
def test_event_fields_nops_with_array_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.1"]
),
{}
)
def test_event_fields_all_fields_if_empty(self):
self.assertEquals(
self.serialize(
MockEvent(
type="foo",
event_id="test",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
[]
),
{
"type": "foo",
"event_id": "test",
"room_id": "!foo:bar",
"content": {
"foo": "bar",
},
"unsigned": {}
}
)
def test_event_fields_fail_if_fields_not_str(self):
with self.assertRaises(TypeError):
self.serialize(
MockEvent(
room_id="!foo:bar",
|
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
##
# Database settings
##
DB_HOST = 'localhost'
DB_NAME = 'scoremodel'
DB_USER = 'scoremodel'
DB_PASS = 'scoremodel'
##
# MySQL SSL connections
##
use_ssl | = False
SSL_CA = '/etc/mysql/certs/ca-cert.pem'
SSL_KEY = '/etc/mysql/keys/client-key.pem'
SSL_CERT = '/etc/mysql/certs/client-cert.pem'
##
# Flask-WTF
##
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret_key'
##
# Log-in
##
REMEMBER_COOKIE_SECURE = True
REMEMBER_COOKIE_HTTPONLY = True
SESSION_PROTECTION = "strong"
##
# Babel
##
BABEL_DE | FAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
LANGUAGES = ['nl', 'en']
##
# Uploads
##
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = ('txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif')
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16 MB
##
# Logger
##
LOG_FILENAME = 'logs/scoremodel.log'
if use_ssl is True:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}?ssl_key={ssl_key}&ssl_cert={ssl_cert}'.format(
user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME, ssl_key=SSL_KEY, ssl_cert=SSL_CERT)
else:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}'.format(user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME)
|
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName |
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
| if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
# https://www.reddit.com/r/dailyprogrammer/comments/3fva66/20150805_challenge_226_intermediate_connect_four/
import sys, string
xmoves = open(sys.argv[1]).read().translate(None, string.ascii_lowercase + ' \n') |
omoves = open(sys.argv[1]).read().translate(None, string.ascii_uppercase + ' \n')
board = [[' ' for x in range(6)] for x in range(7)]
def insert(colchar, player):
colnumber = ord(colchar.lower()) - ord('a')
col = board[colnumber]
| for i in range(len(col)):
if col[i] == ' ':
col[i] = player
break
def checkwinner(player):
for x in range(6):
for y in range(6):
if board[x][y] == player:
top = board[x][y+1:y+4]
if len(top) == 3 and not ''.join(top).strip(player):
return True
try:
right = [board[x+1][y], board[x+2][y], board[x+3][y]]
if not ''.join(right).strip(player):
return True
except:
pass
try:
topright = [board[x+1][y+1], board[x+2][y+2], board[x+3][y+3]]
if not ''.join(topright).strip(player):
return True
except:
pass
for i in range(len(xmoves)):
insert(xmoves[i], 'X')
if checkwinner('X'):
print 'X won at move ' + str(i+1)
break
insert(omoves[i], 'O')
if checkwinner('O'):
print 'O won at move ' + str(i+1)
break
|
ot(sig)] = 0.0
#
self.denoised.append(coef)
self.sig_supports.append(sig)
self.p_cutoff.append(p_cutoff)
# append the last approximation
self.denoised.append(self.get_approx())
def decompose(self, level=5, boundary="symm", verbose=False):
"""
2D IUWT decomposition with VST.
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.calc_vst_coef()
self.decomposition = []
approx = self.data
if verbose:
print("IUWT decomposing (%d levels): " % level,
end="", flush=True, file=sys.stderr)
for scale in range(1, level+1):
if verbose:
print("%d..." % scale, end="", flush=True, file=sys.stderr)
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = self.vst(approx, scale=scale-1) - self.vst(approx2, scale=scale)
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
if verbose:
print("DONE!", flush=True, file=sys.stderr)
return self.decomposition
def reconstruct_ivst(self, denoised=True, positive_project=True):
"""
Reconstruct the original image from the *un-denoised* decomposition
by applying the inverse VST.
This reconstruction result is also used as the `initial condition'
for the below `iterative reconstruction' algorithm.
arguments:
* denoised: whether use th denoised data or the direct decomposition
* positive_project: whether replace negative values with zeros
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
self.positive_project = positive_project
details = np.sum(decomposition[:-1], axis=0)
approx = self.vst(decomposition[-1], scale=self.level)
reconstruction = self.ivst(approx+details, scale=0)
if positive_project:
reconstruction[reconstruction < 0.0] = 0.0
self.reconstruction = reconstruction
return reconstruction
def reconstruct(self, denoised=True, niter=10, verbose=False):
"""
Reconstruct the original image using iterative method with
L1 regularization, because the denoising violates t | he exact inverse
procedure.
arguments:
* denoised: whether use the denoised coefficients
* niter: number of iterations
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
# L1 regularization
lbd = 1.0
delta = lbd / (niter - 1)
# initial solution
solution = self.reconstr | uct_ivst(denoised=denoised,
positive_project=True)
#
iuwt = IUWT(level=self.level)
iuwt.calc_filters()
# iterative reconstruction
if verbose:
print("Iteratively reconstructing (%d times): " % niter,
end="", flush=True, file=sys.stderr)
for i in range(niter):
if verbose:
print("%d..." % i, end="", flush=True, file=sys.stderr)
tempd = self.data.copy()
solution_decomp = []
for scale in range(1, self.level+1):
approx, detail = iuwt.transform(tempd, scale)
approx_sol, detail_sol = iuwt.transform(solution, scale)
# Update coefficients according to the significant supports,
# which are acquired during the denosing precodure with FDR.
sig = self.sig_supports[scale]
detail_sol[sig] = detail[sig]
detail_sol = self.soft_threshold(detail_sol, threshold=lbd)
#
solution_decomp.append(detail_sol)
tempd = approx.copy()
solution = approx_sol.copy()
# last approximation (the two are the same)
solution_decomp.append(approx)
# reconstruct
solution = iuwt.reconstruct(decomposition=solution_decomp)
# discard all negative values
solution[solution < 0] = 0.0
#
lbd -= delta
if verbose:
print("DONE!", flush=True, file=sys.stderr)
#
self.reconstruction = solution
return self.reconstruction
# IUWT_VST }}}
def main():
# commandline arguments parser
parser = argparse.ArgumentParser(
description="Poisson Noise Removal with Multi-scale Variance " + \
"Stabling Transform and Wavelet Transform",
epilog="Version: %s (%s)" % (__version__, __date__))
parser.add_argument("-l", "--level", dest="level",
type=int, default=5,
help="level of the IUWT decomposition")
parser.add_argument("-r", "--fdr", dest="fdr",
type=float, default=0.1,
help="false discovery rate")
parser.add_argument("-I", "--fdr-independent", dest="fdr_independent",
action="store_true", default=False,
help="whether the FDR null hypotheses are independent")
parser.add_argument("-s", "--start-scale", dest="start_scale",
type=int, default=1,
help="which scale to start the denoising (inclusive)")
parser.add_argument("-e", "--end-scale", dest="end_scale",
type=int, default=0,
help="which scale to end the denoising (inclusive)")
parser.add_argument("-n", "--niter", dest="niter",
type=int, default=10,
help="number of iterations for reconstruction")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="show verbose progress")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true", default=False,
help="overwrite output file if exists")
parser.add_argument("infile", help="input image with Poisson noises")
parser.add_argument("outfile", help="output denoised image")
args = parser.parse_args()
if args.end_scale == 0:
args.end_scale = args.level
if args.verbose:
print("infile: '%s'" % args.infile, file=sys.stderr)
print("outfile: '%s'" % args.outfile, file=sys.stderr)
print("level: %d" % args.level, file=sys.stderr)
print("fdr: %.2f" % args.fdr, file=sys.stderr)
print("fdr_independent: %s" % args.fdr_independent, file=sys.stderr)
print("start_scale: %d" % args.start_scale, file=sys.stderr)
print("end_scale: %d" % args.end_scale, file=sys.stderr)
print("niter: %d\n" % args.niter, flush=True, file=sys.stderr)
if not args.clobber and os.path.exists(args.outfile):
raise OSError("outfile '%s' already exists" % args.outfile)
imgfits = fits.open(args.infile)
img = imgfits[0].data
# Remove Poisson noises
msvst = IUWT_VST(data=img)
msvst.decompose(level=args.level, verbose=args.verbose)
msvst.denoise(fdr=args.fdr, fdr_independent=args.fdr_independent,
start_scale=args.start_scale, end_scale=args.end_scale,
verbose=args.verbose)
msvst.reconstruct(denoised=True, niter=args.niter, verbose=args.verbose)
img_denoised = msvst.reconstruction
# Output
imgfits[0].data = img_denoised
imgfits[0].header.add_history("%s: Removed Poisson Noises @ %s" % (
os.path.basename(sys.argv[0]), datetime.utcnow().isoformat()))
imgfits[0].header.add_history(" TOOL: %s (v%s, %s)" % (
os.path.basename(sys.argv[0]), __version__, __date__))
imgfits[0].header.add_history(" PARAM: %s" % " ".join(sys.argv[1:]))
imgfits.writeto(args.outfile, checksum=True, clobber=args.clobber)
if __name__ == "__main__":
main |
"""
Page view class
"""
import os
from Server.Importer import ImportFromModule
class PageView(ImportFromModule("Server.PageViewBase", "PageViewBase")):
"""
Page view class.
"""
_PAGE_TITLE = "Python Web Framework"
d | ef __init__(self, htmlToLoad):
"""
Constructor.
- htmlToLoad : HTML to load
"""
self.SetPageTitle(self._PAGE_TITLE)
self.AddMetaData("charset=\"UTF-8\"")
self.AddMetaData("name=\"viewport\" content=\"width=device-width, initial-scale=1\"")
self.AddStyleSheet("/css/styles.css")
self.AddJavaScript("/js/http.js")
self.LoadHtml(os.path.join | (os.path.dirname(__file__), "%s.html" % htmlToLoad))
self.SetPageData({ "PageTitle" : self._PAGE_TITLE })
|
"""create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, | context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_user_inheritance', schema=schema)
op.execute("DROP FUNCTION IF EXISTS "
"%(schema)s.getMainAccount(VARCHAR)"
| % {"schema": schema})
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_user_inheritance',
sa.Column(
'login', sa.VARCHAR(), autoincrement=False,
nullable=False),
sa.Column(
'login_father', sa.VARCHAR(), autoincrement=False,
nullable=False),
schema=schema
)
op.create_primary_key(
"lux_user_inheritance_pkey", "lux_user_inheritance",
['login', 'login_father'],
schema=schema
)
op.execute(
"CREATE OR REPLACE FUNCTION %(schema)s.getMainAccount "
"(child_login VARCHAR)"
"RETURNS VARCHAR AS "
"$$ "
"DECLARE "
"cur_login_father VARCHAR;"
"res_login_father VARCHAR;"
"c_father Cursor (p_login VARCHAR) FOR "
"Select login_father From %(schema)s.lux_user_inheritance Where "
"login = p_login;"
"BEGIN "
"cur_login_father := child_login;"
"LOOP "
"OPEN c_father(cur_login_father);"
"FETCH FIRST FROM c_father into res_login_father;"
"IF FOUND THEN "
"cur_login_father := res_login_father;"
"END IF;"
"CLOSE c_father;"
"IF NOT FOUND THEN "
"RETURN cur_login_father;"
"END IF;"
"END LOOP;"
"END;"
"$$"
"LANGUAGE plpgsql;" % {"schema": schema})
|
# -*- coding: utf-8 -*-
import os
from django.db import models
from django.db.models import Q
from seahub.tags.models import FileUUIDMap
from seahub.utils import normalize_file_path
class RelatedFilesManager(models.Manager):
def get_related_files_uuid(self, uuid):
related_files_uuid = super(RelatedFilesManager, self).filter(
Q(o_uuid=uuid) | Q(r_uuid=uuid)).select_related('o_uuid', 'r_uuid')
return related_files_uuid
def get_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_parent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
try:
return super(RelatedFilesManager, self).get(
Q(o_uuid=o_uuid, r_uuid=r_uuid) | Q(o_uuid=r_uuid, r_uuid=o_uuid))
except self.model.DoesNotExist:
return None
def add_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_pa | rent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
related_file_uuid = self.model(o_uuid=o_uuid, r_uuid | =r_uuid)
related_file_uuid.save()
return related_file_uuid
def get_related_file_uuid_by_id(self, related_id):
try:
return super(RelatedFilesManager, self).get(pk=related_id)
except self.model.DoesNotExist:
return None
def delete_related_file_uuid(self, related_id):
try:
file_related = super(RelatedFilesManager, self).get(pk=related_id)
file_related.delete()
return True
except self.model.DoesNotExist:
return False
class RelatedFiles(models.Model):
o_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='o_uuid')
r_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='r_uuid')
objects = RelatedFilesManager()
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER D | EALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.StateVariables.Element import Element
class SvShuntCompensatorSections(Element):
"""State variable for the number of sections in service for a shunt compensator.
"""
def __init__(self, sections=0, continuousSections=0.0, ShuntCompensator=None, *args, **kw_arg | s):
"""Initialises a new 'SvShuntCompensatorSections' instance.
@param sections: The number of sections in service.
@param continuousSections: The number of sections in service as a continous variable.
@param ShuntCompensator: The shunt compensator for which the state applies.
"""
#: The number of sections in service.
self.sections = sections
#: The number of sections in service as a continous variable.
self.continuousSections = continuousSections
self._ShuntCompensator = None
self.ShuntCompensator = ShuntCompensator
super(SvShuntCompensatorSections, self).__init__(*args, **kw_args)
_attrs = ["sections", "continuousSections"]
_attr_types = {"sections": int, "continuousSections": float}
_defaults = {"sections": 0, "continuousSections": 0.0}
_enums = {}
_refs = ["ShuntCompensator"]
_many_refs = []
def getShuntCompensator(self):
"""The shunt compensator for which the state applies.
"""
return self._ShuntCompensator
def setShuntCompensator(self, value):
if self._ShuntCompensator is not None:
self._ShuntCompensator._SvShuntCompensatorSections = None
self._ShuntCompensator = value
if self._ShuntCompensator is not None:
self._ShuntCompensator.SvShuntCompensatorSections = None
self._ShuntCompensator._SvShuntCompensatorSections = self
ShuntCompensator = property(getShuntCompensator, setShuntCompensator)
|
import numpy as np
from bokeh.layouts import layout
from bokeh.models import CustomJS, Slider, ColumnDataSource, WidgetBox
from bokeh.plotting import figure, output_file, show
output_file('dashboard.html')
tools = 'pan'
def bollinger():
# Define Bollinger Bands.
upperband = np.random.random_integers(100, 150, size=100)
lowerband = upperband - 100
x_data = np.arange(1, 101)
# Bollinger shading glyph:
band_x = np.append(x_data, x_data[::-1])
band_y = np.append(lowerband, upperband[::-1])
p = figure(x_axis_type='datetime', tools=tools)
p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2)
p.title.text = 'Bollinger Bands'
p.title_location = 'left'
p.title.align = 'left'
p.plot_height = 600
p.plot_width = 800
p.grid.grid_line_alpha = 0.4
return [p]
def slider():
x = np.linspace(0, 10, 100)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(
y_range=(-10, 10), tools='', toolbar_location=None,
title="Sliders example")
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var A = amp.value;
var k = freq.value;
var phi = phase.value;
var B = offset.value;
x | = data['x']
y = data['y']
for (i | = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.trigger('change');
""")
amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", callback=callback, callback_policy='mouseup')
callback.args["amp"] = amp_slider
freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", callback=callback)
callback.args["freq"] = freq_slider
phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", callback=callback)
callback.args["phase"] = phase_slider
offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset", callback=callback)
callback.args["offset"] = offset_slider
widgets = WidgetBox(amp_slider, freq_slider, phase_slider, offset_slider)
return [widgets, plot]
def linked_panning():
N = 100
x = np.linspace(0, 4 * np.pi, N)
y1 = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(x) + np.cos(x)
s1 = figure(tools=tools)
s1.circle(x, y1, color="navy", size=8, alpha=0.5)
s2 = figure(tools=tools, x_range=s1.x_range, y_range=s1.y_range)
s2.circle(x, y2, color="firebrick", size=8, alpha=0.5)
s3 = figure(tools='pan, box_select', x_range=s1.x_range)
s3.circle(x, y3, color="olive", size=8, alpha=0.5)
return [s1, s2, s3]
l = layout([
bollinger(),
slider(),
linked_panning(),
], sizing_mode='stretch_both')
show(l)
|
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it w | ill be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General | Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Links to arXiv"""
from cgi import escape
from invenio.base.i18n import gettext_set_language
def format_element(bfo, tag="037__", target="_blank"):
"""
Extracts the arXiv preprint information and
presents it as a direct link towards arXiv.org
"""
_ = gettext_set_language(bfo.lang)
potential_arxiv_ids = bfo.fields(tag)
arxiv_id = ""
for potential_arxiv_id in potential_arxiv_ids:
if potential_arxiv_id.get('9') == 'arXiv' and potential_arxiv_id.get('a', '').startswith('arXiv:'):
arxiv_id = potential_arxiv_id['a'][len('arXiv:'):]
return '<a href="http://arxiv.org/abs/%s" target="%s" alt="%s">%s</a>' % (
escape(arxiv_id, True),
escape(target, True),
escape(_("This article on arXiv.org"), True),
escape(arxiv_id))
return ""
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
from cast.analysers import log, mainframe
class EmptyParagraphEndOfSection(mainframe.Extension):
def __init__(self):
self.program = None
def start_program(self, program):
self.program = program
def end_program(self, _):
self.program = None
def start_section(self, section):
last_paragraph = section.get_children()[-1]
if 'paragraph' == last_paragraph.get_kind():
children = last_paragraph.get_children()
if len(children) > 1:
# violation test_ko2
self.program.save_violation('MyCompany_COBOL_Rules.se | ctionEndParagraph', section.get_position())
elif len(children) == 1:
kind = children[0].get_kind()
if kind | not in ['exit', 'stop_run', 'goback']:
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
else:
# violation test_ko1
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
|
#! /usr/bin/env python
'''
Testsuite for the CustomParameter class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
print("maximum difference is", np.max(np.abs(ay - y2)))
assert np.allclose(ay, y2, **args)
n = NormalParameter('x','x',mean=10,dev=1)
norm80 = n.pdf.lhs(80)
# test mean and deviation
def test_custom_pdf_meandev():
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(norm80))
assert np.allclose(c.pdf.mean, 10.0, rtol=.05), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 1.0, rtol=.05), "dev=%s" % c.pdf.dev
# test lhs()
def test_custom_pdf_lhs():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.9, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.lhs(1000)
dx, dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.01)
# test lhs1()
def test_custom_pdf_lhs1():
a = np.array([12,12,13,13,13,14,14,14,14,15,15,15,15,15,16,16,16,16,16,17,17,17,18,18])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.004)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.03)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
# test lhs()
def test_custom_pdf_lhs_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.7, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.ds(1000)
dx,dy = _hisplot(data, 40)
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
"""
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
# test lhs1()
def test_custom_pdf_lhs1_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_small():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.2), "dev=%s" % c.pdf.dev
def test_custom_pdf_small_fit():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fi | t=True))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.4), "dev=%s" % c.pdf.dev
# single data point. Must use Bayesian fit.
def test_custom_pdf_single_fit():
a = np.ar | ray([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, error=NormalPDF(0,.1)))
assert np.allclose(c.pdf.mean, 42), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, .1, atol=.01), "dev=%s" % c.pdf.dev
def test_custom_pdf_single():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 42
assert c.pdf.dev == 0
assert c.pdf.mode == 42
def test_custom_pdf_zero():
a = np.array([0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozero():
a = np.array([0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero_fit():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_const():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
def test_custom_pdf_const_fit():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
#### EXCEPTION TESTING
# forget to include pdf
def test_custom_pdf_exception():
ok = False
try:
c = CustomParameter('x', 'X, the unknown')
except ValueError:
ok = True
except:
assert False, 'Wrong Exception'
if not ok:
assert False, 'No Exception when one was expected'
if __name__ == "__main__":
test_custom_pdf_meandev()
test_custom_pdf_lhs()
test_custom_pdf_lhs1()
test_custom_pdf_random()
test_custom_pdf_lhs_nofit()
test_custom_pdf_lhs1_nofit()
test_custom_pdf_random_nofit()
test_custom_pdf_exception()
test_custom_pdf_small()
test_custom_pdf_small_fit()
test_custom_pdf_single()
test_custom_pdf_single_fit()
test_custom_pdf_const()
test_custom_pdf_const_fit()
test_custom_pdf_zero()
test_custom_pdf_zerozero()
test_custom_pdf_zerozerozero()
test_custom_pdf_zerozerozero_fit()
|
import logging
from virttest import virsh
from provider import libvirt_version
from autotest.client.shared import error
def run_cmd_in_guest(vm, cmd):
"""
Run command in the guest
:params vm: vm object
:params cmd: a command needs to be r | an
"""
session = vm.wait_for_login()
status, output = session.cmd_status_output(cmd)
logging.debug("The '%s' output: %s", cmd, output)
if status:
session.close()
raise error.TestError("Can not run '%s' in gue | st: %s", cmd, output)
else:
session.close()
return output
def run(test, params, env):
"""
1. Configure kernel cmdline to support kdump
2. Start kdump service
3. Inject NMI to the guest
4. Check NMI times
"""
for cmd in 'inject-nmi', 'qemu-monitor-command':
if not virsh.has_help_command(cmd):
raise error.TestNAError("This version of libvirt does not "
" support the %s test", cmd)
vm_name = params.get("main_vm", "virt-tests-vm1")
vm = env.get_vm(vm_name)
start_vm = params.get("start_vm")
expected_nmi_times = params.get("expected_nmi_times", '0')
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
+ " libvirt version.")
if start_vm == "yes":
# start kdump service in the guest
cmd = "which kdump"
try:
run_cmd_in_guest(vm, cmd)
except:
try:
# try to install kexec-tools on fedoraX/rhelx.y guest
run_cmd_in_guest(vm, "yum install -y kexec-tools")
except:
raise error.TestNAError("Requires kexec-tools(or the "
"equivalent for your distro)")
# enable kdump service in the guest
cmd = "service kdump start"
run_cmd_in_guest(vm, cmd)
# filter original 'NMI' information from the /proc/interrupts
cmd = "grep NMI /proc/interrupts"
nmi_str = run_cmd_in_guest(vm, cmd)
# filter CPU from the /proc/cpuinfo and count number
cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
vcpu_num = run_cmd_in_guest(vm, cmd).strip()
logging.info("Inject NMI to the guest via virsh inject_nmi")
virsh.inject_nmi(vm_name, debug=True, ignore_status=False)
logging.info("Inject NMI to the guest via virsh qemu_monitor_command")
virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')
# injects a Non-Maskable Interrupt into the default CPU (x86/s390)
# or all CPUs (ppc64), as usual, the default CPU index is 0
cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd)
real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times)
# check Non-maskable interrupts times
if real_nmi_times != expected_nmi_times:
raise error.TestFail("NMI times aren't expected %s:%s",
real_nmi_times, expected_nmi_times)
|
import numpy as np
import tensorflow as tf
import os
def get_inputs(split, config):
split_dir = config['split_dir']
data_dir = config['data_dir']
dataset = config['dataset']
split_file = os.path.join(split_dir, dataset, split + '.lst')
filename_queue = get_filename_queue(split_file, os.path.join(data_dir, dataset))
if dataset == 'mnist':
image = get_inputs_mnist(filename_queue, config)
config['output_size'] = 28
config['c_dim'] = 1
elif dataset == "cifar-10":
image = get_inputs_cifar10(filename_queue, config)
config['output_size'] = 32
config['c_dim'] = 3
else:
image = get_inputs_image(filename_queue, config)
image_batch = create_batch([image], config['batch_size'])
return image_batch
def get_inputs_image(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Read a record, getting filenames from the filename_queue.
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_image(value, channels=c_dim)
image = tf.cast(image, tf.float32)/255.
image_shape = tf.shape(image)
image_height, image_width = image_shape[0], image_shape[1]
offset_height = tf.cast((image_height - image_size)/2, tf.int32)
offset_width = tf.cast((image_width - image_size)/2, tf.int32)
image = tf.image.crop_to_bounding_box(image, offset_height, offset_width, image_size, image_size)
image = tf.image.resize_images(image, [output_size, output_size])
image.set_shape([output_size, output_size, c_dim])
return image
def get_inputs_mnist(filename_queue, config):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since all keys are required.
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([784])
image = tf.reshape(image, [28, 28, 1])
image = tf.cast(image, tf.float32) / 255.
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
binary_image = (tf.random_uniform(image.get_shape() | ) <= image)
binary_image = tf.cast(binary_image, tf.float32)
return binary_image
de | f get_inputs_cifar10(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
image_bytes = 32 * 32 * 3
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
key, value = reader.read(filename_queue)
record = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
label = tf.cast(record[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
#tf.strided_slice(record, [label_bytes], [label_bytes + image_bytes])
image = tf.reshape(record[label_bytes:label_bytes+image_bytes], [3, 32, 32])
image = tf.cast(image, tf.float32)/255.
# Convert from [depth, height, width] to [height, width, depth].
image = tf.transpose(image, [1, 2, 0])
return image
def get_filename_queue(split_file, data_dir):
with open(split_file, 'r') as f:
filenames = f.readlines()
filenames = [os.path.join(data_dir, f.strip()) for f in filenames]
for f in filenames:
if not os.path.exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
return filename_queue
def create_batch(inputs, batch_size=64, min_queue_examples=1000, num_preprocess_threads=12, enqueue_many=False):
# Generate a batch of images and labels by building up a queue of examples.
batch = tf.train.shuffle_batch(
inputs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
enqueue_many=enqueue_many,
)
return batch
|
]
big_cnts = []
obj_points = []
image = timage
thresh_limit = 10
thresh_limit = find_best_thresh(image, thresh_limit, 0)
# find best thresh limit code here!
line_objects = []
points = []
orphan_lines = []
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print ("CNTS:", len(cnts))
hit = 0
objects = []
if len(cnts) < 500:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
if (w < 10 and h <10):
nothing = 0
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
#cv2.circle(image, (x,y), 20, (120), 1)
#if w != h:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
else:
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# Convert big object into points and add each one to the points array.
crop = timage[y:y+h,x:x+w]
points.append((x,y,w,h))
if w < 600 and h < 400:
crop_points = find_points_in_crop(crop,x,y,w,h)
for x,y,w,h in crop_points:
print("adding some points",x,y,w,h)
points.append((x,y,w,h))
points.append((x,y,w,h))
#objects.append((x,y,w,h))
else:
image[y:y+h,x:x+w] = [0]
else:
print ("WAY TO MANY CNTS:", len(cnts))
thresh_limit = thresh_limit + 5
return(points)
# find line objects
if (len(objects) + len(points)) > 0:
line_groups, orphan_lines, stars, obj_points = find_objects(0, points)
else:
line_groups = []
final_group = []
final_groups = []
reject_group = []
reject_groups = []
line_segments = flatten_line_groups(line_groups)
line_segments = sorted(line_segments, key = lambda x: (x[0],x[1]))
if len(line_segments) > 0:
final_group, reject_group = regroup_lines(line_segments)
print ("MIKE!:", len(final_group))
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
reject_group = sorted(reject_group, key = lambda x: (x[1],x[0]))
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 2nd try")
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 3rd try")
# try to adopt the orphans!
if len(orphan_lines) >= 1:
print (orphan_lines)
final_group, reject_group = regroup_lines(orphan_lines)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
final_groups, orphan_lines = clean_line_groups(final_groups, orphan_lines)
clusters= []
clusters_ab= []
last_x = None
last_y = None
last_ang = None
ang = None
if len(points) > 3:
num_clusters = int(len(points)/3)
clusters = kmeans_cluster(points, num_clusters)
#print ("MIKE CLUSTERS", len(clusters))
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if last_x is not None:
ang = find_angle(x,y,last_x,last_y)
print ("CLUSTER ANGLE:", x,y,last_x,last_y,ang)
if last_ang is not None:
if ang - 5 < last_ang < ang + 5:
cv2.line(image, (x,y), (l | ast_x,last_y), (200), 4)
last_x = x
last_y = y
| last_ang = ang
a, b = best_fit (cxs,cys)
mnx = min(cxs)
mny = min(cys)
mmx = max(cxs)
mmy = max(cys)
cv2.rectangle(image, (mnx,mny), (mmx, mmy), (255),1)
#print ("MIKE MIKE XS,", cxs)
#print ("MIKE MIKE YS,", cys)
clusters_ab.append((a,b))
print ("MIKE AB,", a,b)
print ("FINAL ANALYSIS")
print (final_groups)
print ("--------------")
print ("File Name: ", filename)
print ("Total Points:", len(points))
print ("Total Line Segments:", len(line_segments))
print ("Total Final Line Groups:", len(final_groups))
print ("Total Clusters:", len(clusters))
cl =0
for a,b in clusters_ab:
print ("Cluster " + str(cl + 1) + " " + str(len(clusters[cl])) + " points")
print ("LINE AB " + str(a) + " " + str(b))
cl = cl + 1
#print (final_groups)
print ("Total Rejected Lines:", len(reject_group))
gc = 1
xs = ys = []
for line_group in final_groups:
lc = 1
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
xs.append(x1)
xs.append(x2)
ys.append(y1)
ys.append(y2)
#print (gc, lc, line)
lc = lc + 1
gc = gc + 1
if len(xs) > 0 and len(ys) > 0:
mnx = min(xs)
mxx = max(xs)
mny = min(ys)
mxy = max(ys)
cv2.rectangle(image, (mnx,mny), (mxx, mxy), (255),1)
print ("Total Orphaned Lines:", len(orphan_lines))
if len(line_groups) > 0:
line_segments = flatten_line_groups(line_groups)
find_line_nodes(line_segments)
gc = 1
for line_group in line_groups:
lc = 1
line_group = sorted(line_group, key = lambda x: (x[2],x[3]))
dist,angle,sx1,sy1,sx2,sy2 = line_group[0]
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
#s_ang = find_angle(sx1,sy1,x1,y1)
#if angle - 5 < s_ang < angle + 5:
# print("FINAL GROUP:", gc,lc,line, angle, s_ang)
# final_group.append((dist,angle,x1,y1,x2,y2))
#else:
# print("REJECT GROUP:", gc,lc,line, angle, s_ang)
# reject_group.append((dist,angle,x1,y1,x2,y2))
#seg_dist = find_closest_segment(line, line_group)
cv2.line(image, (x1,y1), (x2,y2), (255), 2)
cv2.putText(image, "L " + str(lc), (x1+25,y1+10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
lc = lc + 1
if len(line_group) > 0:
cv2.putText(image, "LG " + str(gc), (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
gc = gc + 1
for line in orphan_lines:
#print("ORPHAN:", line)
dist,angle,x1,y1,x2,y2 = line
cv2.line(image, (x1,y1), (x2,y2), (255), 1)
cv2.putText(image, "Orph" , (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
#cv2.ellipse(image,(ax,ay),(dist_x,dist_y),elp_ang,elp_ang,180,255,-1)
#a,b = best_fit(lxs, lys)
#plt.scatter(lxs,lys)
#plt.xlim(0,640)
#plt.ylim(0,480)
#yfit = [a + b * xi for xi in lxs]
#plt.plot(lxs,yfit)
#cv2.imshow('pepe', image)
#cv2.waitKey(1)
#plt.gca().invert_yaxis()
#plt.show |
'''
Using the Python language, have the function MultiplicativePersistence(num)
take | the num parameter being passed which will always be a positive integer
and return its multiplicative persistence which is the number of times
you must multiply the digits in num until you reach a single digit.
For example: if num is 39 then your program should return 3
because 3 * 9 = 27 then 2 * 7 = 14 and finally 1 * 4 = 4 and you st | op at 4.
'''
def MultiplicativePersistence(num):
steps = 0
while num > 9:
snum = str(num)
sdigits = list(snum)
num = 1
for snum in sdigits:
n = int(snum)
num = num * n
steps = steps + 1
return steps
# keep this function call here
# to see how to enter arguments in Python scroll down
print MultiplicativePersistence(raw_input())
|
# Copyright 2021 ForgeFlow S.L. <https://www.forgeflow.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
_xmlid_renames = [
(
"sale.access_product_product_attribute_custom_value",
"sale.access_product_product_attribute_custom_value_sale_manager",
),
("sale.account_move_see_all", "sale.account_invoice_rule_see_all"),
("sale.account_move_personal_rule", "sale.account_invoice_rule_see_personal"),
("sale.account_move_line_see_all | ", "sale.account_invoice_line_rule_see_all"),
(
"sale.account_move_line_personal_rule",
"sale.account_invoice_line_rule_see_personal",
),
]
def fast_fill_sale_order_currency_id(env):
if not openupgrade.column_exists(env.cr, "sale_order", "currency_id" | ):
openupgrade.logged_query(
env.cr,
"""
ALTER TABLE sale_order
ADD COLUMN currency_id integer""",
)
openupgrade.logged_query(
env.cr,
"""
UPDATE sale_order so
SET currency_id = pp.currency_id
FROM product_pricelist pp
WHERE so.pricelist_id = pp.id""",
)
@openupgrade.migrate()
def migrate(env, version):
openupgrade.rename_xmlids(env.cr, _xmlid_renames)
fast_fill_sale_order_currency_id(env)
|
lue 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
raise ValueError(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewi | se_std_normalization:
self.std = np.std(x, | axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
class Iterator(object):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
if self.val_mode:
while 1:
if self.batch_index == 0:
index_array = np.arange(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
if n < current_index + batch_size:
break
else:
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, directory, dataset,
image_data_generator,
data_split,
val_mode=False,
target_size=(360, 480),
target_size_mask=(45, 60),
color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
follow_links=False,
keep_classes=None,
random_split=False,
validation_data=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.target_size_mask = tuple(target_size_mask)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == ' |
# -*- coding: utf-8 -*-
class Charset:
c | ommon_name = u'Google Fonts: Extras'
native_name = u''
def glyphs(self):
glyphs = [0xe0ff] # PUA: Font logo
glyphs += [0xeffd] # PUA: Font version number
glyphs += [0xf000] # PUA: font ppem size indicator: run `ftview -f 1255 10 Ubuntu-Regular.ttf` to see it in action!
return g | lyphs
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Philipp Wagner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, math
from PIL import Image
import facedetect
def Distance(p1,p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx*dx+dy*dy)
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
if (scale is None) and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
# calc rotation angle in radians
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width
reference = dest_sz[0] - 2.0*offset_h
# scale factor
scale = float(dist)/float(reference)
# rotate original around the left eye
image = ScaleRotateTranslate(image | , center=eye_left, angle=rotation)
# crop the rotated image
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
| image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
if __name__ == "__main__":
f = open(sys.argv[1], 'r')
csv = open(sys.argv[2], "w")
for line in f:
lineArray = line.split(";")
fileName = lineArray[0]
label = lineArray[1]
print "aligning %s to aligned" % (fileName)
aligned_file_name = "aligned/%s" % fileName
face = facedetect.detect_faces(fileName)['face'][0]
print(face)
CropFace(Image.open(fileName), eye_left=(face[0],face[1]), eye_right=(face[2],face[1]), offset_pct=(0.08,0.08), dest_sz=(200,200)).save(aligned_file_name)
# CropFace(Image.open(fileName), eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save(aligned_file_name)
csv.write("%s;%s" % (aligned_file_name, label))
f.close()
csv.close() |
# -*- cod | ing: utf-8 -*-
import unittest
from config.context import Attribute, attr
class Data(object):
pass
class AttributeTestCase(unittest.TestCase):
def setUp(self):
self.data= Data()
self.data.int2= 1
self.integer= 3
self.int1= Attribute("int1", destObj= self.data, valueType=int)
self.int2= Attribute("int2", destObj= se | lf.data)
self.int3= Attribute("integer", destObj= self)
self.flt1= Attribute("flt",
destObj= self.data,
destName="float",
valueType=float )
self.flt2= Attribute("value", valueType= float)
self.str = Attribute("string", destObj=self.data)
def test_construction(self):
self.assertEqual(self.int1.name, "int1")
self.assertEqual(self.int2.name, "int2")
self.assertEqual(self.int3.name, "integer")
self.assertEqual(self.flt1.name, "flt")
self.assertEqual(self.flt2.name, "value")
self.assertEqual(self.int1.data, self.data.int1)
self.assertEqual(self.int2.data, self.data.int2)
self.assertEqual(self.int3.data, self.integer)
self.assertEqual(self.flt1.data, self.data.float)
self.assertEqual(self.flt2.data, self.flt2.value)
self.assertEqual(self.str.data, self.data.string)
def test_fromString(self):
value="123"
x=int(value)
self.int1.fromString(value)
self.int2.fromString(value)
self.int3.fromString(value)
self.str.fromString(value)
self.assertEqual(self.int1.data, x)
self.assertEqual(self.int2.data, x)
self.assertEqual(self.int3.data, x)
self.assertEqual(self.data.int1, x)
self.assertEqual(self.data.int2, x)
self.assertEqual(self.integer, x)
self.assertEqual(self.data.string, value)
value="1.23"
self.flt1.fromString(value)
self.flt2.fromString(value)
x= float(value)
self.assertEqual(self.flt1.data, x)
self.assertEqual(self.data.float, x)
self.assertEqual(self.flt2.data, x)
self.assertEqual(self.flt2.value, x)
value="a1.23"
self.assertRaises(ValueError, self.int1.fromString, value)
def test_contextInterface(self):
self.assertIsNone(self.int1.parent)
self.assertIs(self.int1, self.int1.decorator)
self.assertEqual(self.flt1.about, "")
self.assertIsNone(self.flt2.default)
self.int1.open()
self.int1.close()
self.int1.clear()
self.assertRaises(NotImplementedError, self.int2.getContext, "xx")
self.assertRaises(NotImplementedError, self.int2.insert, self.int3)
def test_attr(self):
dmc= attr("int1", destObj= self.data, valueType=int)
self.assertIs(dmc._ctx.data, self.data.int1)
def suite():
"""Get Test suite object
"""
return unittest.TestLoader().loadTestsFromTestCase(AttributeTestCase)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run( suite() ) |
# -*- coding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Authors: Raphaël Valyi, Renato Lima
# Copyright (C) 2011 Akretion LTDA.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as |
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABI | LITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Sale Exceptions',
'summary': 'Custom exceptions on sale order',
'version': '1.0',
'category': 'Generic Modules/Sale',
'description': """
Sale Exceptions
===============
This module allows you attach several customizable exceptions to your
sale order in a way that you can filter orders by exceptions type and fix them.
This is especially useful in an order importation scenario such as with
the base_sale_multi_channels module, because it's likely a few orders have
errors when you import them (like product not found in Odoo, wrong line
format etc...)
Contributors
------------
* Raphaël Valyi <raphael.valyi@akretion.com>
* Renato Lima <renato.lima@akretion.com>
* Sébastien BEAU <sebastien.beau@akretion.com>
* Guewen Baconnier <guewen.baconnier@camptocamp.com>
* Yannick Vaucher <yannick.vaucher@camptocamp.com>
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['sale'],
'data': ['sale_workflow.xml',
'sale_view.xml',
'sale_exceptions_data.xml',
'wizard/sale_exception_confirm_view.xml',
'security/ir.model.access.csv',
'settings/sale.exception.csv'],
'installable': True,
}
|
import numpy as np
import openmc
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 15
inactive = 5
particles = 10000
###############################################################################
# Exporting to OpenMC materials.xml File
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
fuel1 = openmc.Material(material_id=1, name='fuel')
fuel1.set_density('g/cc', 4.5)
fuel1.add_nuclide('U235', 1.)
fuel2 = openmc.Material(material_id=2, name='depleted fuel')
fuel2.set_density('g/cc', 4.5)
fuel2.add_nuclide('U238', 1.)
moderator = openmc.Material(material_id=3, name='moderator')
moderator.set_density('g/cc', 1.0)
moderator.add_element('H', 2.)
moderator.add_element('O', 1.)
moderator.add_s_alpha_beta('c_H_in_H2O')
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([fuel1, fuel2, moderator])
materials_file.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# Instantiate planar surfaces
x1 = openmc.XPlane(surface_id=1, x0=-10)
x2 = openmc.XPlane(surface_id=2, x0=-7)
x3 = openmc.XPlane(surface_id=3, x0=-4)
x4 = openmc.XPlane(surface_id=4, x0=4)
x5 = openmc.XPlane(surface_id=5, x0=7)
x6 = openmc.XPlane(surface_id=6, x0=10)
y1 = openmc.YPlane(surface_id=11, y0=-10)
y2 = openmc.YPlane(surface_id=12, y0=-7)
y3 = openmc.YPlane(surface_id=13, y0=-4)
y4 = openmc.YPlane(surface_id=14, y0=4)
y5 = openmc.YPlane(surface_id=15, y0=7)
y6 = openmc.YPlane(surface_id=16, y0=10)
z1 = openmc.ZPlane(surface_id=21, z0=-10)
z2 = openmc.ZPlane(surface_id=22, z0=-7)
z3 = openmc.ZPlane(surface_id=23, z0=-4)
z4 = openmc.ZPlane(surface_id=24, z0=4)
z5 = openmc.ZPlane(surface_id=25, z0=7)
z6 = openmc.ZPlane(surface_id=26, z0=10)
# Set vacuum boundary conditions on outside
for surface in [x1, x6, y1, y6, z1, z6]:
surface.boundary_type = 'vacuum'
# Instantiate Cells
inner_box = openmc.Cell(cell_id=1, name='inner box')
middle_box = openmc.Cell(cell_id=2, name='middle box')
outer_box = openmc.Cell(cell_id=3, name='outer box')
# Use each set of six planes to create solid cube regions. We can then use these
# to create cubic shells.
inner_cube = +x3 & -x4 & +y3 & -y4 & +z3 & -z4
middle_cube = +x2 & -x5 & +y2 & -y5 & +z2 & -z5
outer_cube = +x1 & -x6 & +y1 & -y6 & +z1 & -z6
outside_inner_cube = -x3 | | +x4 | -y3 | +y4 | -z3 | | +z4
# Use surface half-spaces to define regions
inner_box.region = inner_cube
middle_box.region = middle_cube & outside_inner_cube
outer_box.region = outer_cube & ~middle_cube
# Register Materials with Cells
inner_box.fill = fuel1
middle_box.fill = fuel2
outer_box.fill = moderator
# Instantiate root universe
root = openmc.Universe(universe_id=0, name='root universe')
root.add_cells([inner_box, middle_box, outer_box])
# Instantiate a Geometry, register the root Universe, and export to XML
geometry = openmc.Geometry(root)
geometry.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml File
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Create an initial uniform spatial source distribution over fissionable zones
uniform_dist = openmc.stats.Box(*outer_cube.bounding_box, only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
###############################################################################
# Exporting to OpenMC plots.xml File
###############################################################################
plot = openmc.Plot(plot_id=1)
plot.origin = [0, 0, 0]
plot.width = [20, 20]
plot.pixels = [200, 200]
plot.color_by = 'cell'
# Instantiate a Plots collection and export to XML
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
|
"""
Copyright 2016, Paul Powell, All rights reserved.
"""
import team
import round
class Region:
def __init__(self, name, teams, algorithm):
self.initialize(name, teams)
self.name = name
self.rounds = []
self.algorithm = algorithm
self.final = None
def __call__(self, madness):
round1 = round.Round(self.name, 1, madness, self.algorithm, self.matchups)
round2 = round1.go()
round3 = round2.go()
round4 = round3.go()
self.rounds = [round1, round2, round3, round4]
# Special hacks for final round
self.final = self.algorithm(round4.games[0], madness)
round4.winner = self.final.winner
round4.results.append(self.final)
return self.final()[0]
def initialize(self, name, teams):
# Looks like [((1,16), (8,9)), ((5,12), (4,13)), ((6,11), (3,14)), ((7,10), (2,15))]
sregion = name
game1 = (team.Team(teams[1], sregion, 1), team.Team(teams[16], sregion, 16))
game2 = (team.Team(teams[8], sregion, 8), team.Team(teams[9], sregion, 9))
game3 = (team.Team(teams[5], sregion, 5), team.Team(teams[12], sregion, 12))
game4 = (team.Team(teams[4], sregion, 4), team.Team(teams[13], sregion, 13))
game5 = (team.Team(teams[6], sregion, 6), team.Team(teams[11], sregion, 11))
game6 = (team.Team(teams[3], sregion, 3), team.Team(teams[14], sregion, 14))
game7 = (team.Team(teams[7], sregion, 7), team.Team(teams[10], sregion, 10))
game8 = (team.Team(teams[2], sregion, 2), team.Team(teams[15], sregion, 15))
self.matchups = [(game1, game2), (game3, game4), (game5, game6), (game7, game8)]
def set_sf(self, winner, second):
for matchup in self.matchups:
for game in matchup:
| for team in game:
if team.name == winner:
print "found winner"
team.sf = 3
if team.name == second:
| print "found second"
team.sf = 2
|
ne
menu_select_color = ColorScheme.get_primary()
def __init__(self, track_manager, status_pump, **kwargs):
Builder.load_file(STATUS_KV_FILE)
super(StatusView, self).__init__(**kwargs)
self.track_manager = track_manager
self.register_event_type('on_tracks_updated')
self._menu_node = self.ids.menu
self._menu_node.bind(selected_node=self._on_menu_select)
status_pump.add_listener(self.status_updated)
self._build_core_menu()
def _build_core_menu(self):
# build application status node
self._append_menu_node('Application', 'app')
# select the first node in the tree.
self._menu_node.select_node(self._menu_node.root.nodes[0])
def _build_menu(self):
if self._menu_built:
return
for item in self.status.iterkeys():
text = self._menu_keys[item] if item in self._menu_keys else item
self._append_menu_node(text, item)
self._menu_built = True
def _append_menu_node(self, text, item):
label = LinkedTreeViewLabel(text=text)
label.id = item
label.color_selected = self.menu_select_color
return self._menu_node.add_node(label)
def _on_menu_select(self, instance, value):
self._selected_item = value.id
self.update()
def status_updated(self, status):
self.status = status['status']
def update(self):
_bg_current = RAW_STATUS_BGCOLOR_1
if self._selected_item in self._menu_keys:
text = self._menu_keys[self._selected_item]
else:
text = self._selected_item
self.ids.name.text = text
self.ids.status_grid.clear_widgets()
function_name = ('render_' + self._selected_item).lower()
# Generic way of not having to create a long switch or if/else block
# to call each render function
if function_name in dir(self):
getattr(self, function_name)()
else:
self.render_generic(self._selected_item)
def render_generic(self, section):
status = self.status[section]
for item, value in status.iteritems():
self._add_item(item, value)
def render_app(self):
label_widget = StatusTitle(text='Application Log')
self.ids.status_grid.add_widget(label_widget)
self.ids.status_grid.add_widget(ApplicationLogView())
self._add_item('Application Version', RaceCaptureApp.get_app_version())
def render_system(self):
if 'git_info' in self.status['system']:
version = self.status['system']['git_info']
else:
version = '.'.join(
[
str(self.status['system']['ver_major']),
str(self.status['system']['ver_minor']),
str(self.status['system']['ver_bugfix'])
]
)
self._add_item('Version', version)
self._add_item('Serial Number', self.status['system']['serial'])
uptime = timedelta(seconds=(self.status['system']['uptime'] / 1000))
self._add_item('Uptime', uptime)
def render_gps(self):
status = self.status['GPS']
init_status = self._get_enum_definition('GPS', 'init', status['init'])
quality = self._get_enum_definition('GPS', 'qual', status['qual'])
location = str(status['lat']) + ', ' + str(status['lon'])
satellit | es = status['sats']
dop = status['DOP']
self._add_item('Status', init_status)
self._add_item('GPS Quality', quality)
self._add_item('Location', location)
self._add_item('Satellites', satellites)
self._add_item('Dilution of precision', dop)
def render_cell(self):
status = self.status['cell']
init_status = self._get_enum_definition('c | ell', 'init', status['init'])
imei = status['IMEI']
signal_strength = self._get_enum_definition('cell', 'sig_str', status['sig_str'], 'Unknown')
number = status['number']
self._add_item('Status', init_status)
self._add_item('IMEI', imei)
self._add_item('Signal strength', signal_strength)
self._add_item('Phone Number', number)
self._add_item('Network Status', status.get('state', '').capitalize())
def render_bt(self):
status = self.status['bt']
init_status = self._get_enum_definition('bt', 'init', status['init'])
self._add_item('Status', init_status)
def render_wifi(self):
status = self.status['wifi']
initialized = status['initialized']
ap_enabled = status['ap']['active']
self._add_item('Status', self._get_enum_definition('wifi', 'init', int(status['initialized'])))
self._add_item('Access Point', 'Enabled' if ap_enabled else 'Disabled')
client_enabled = status['client']['active']
client_connected = status['client']['connected']
connected_msg = '' if not client_enabled else '({})'.format(
'Connected' if client_connected else 'Disconnected')
client_status_msg = '{} {}'.format('Enabled' if client_enabled else 'Disabled', connected_msg)
self._add_item('Client', client_status_msg)
def render_imu(self):
status = self.status['imu']
self._add_item('Status', 'Initialized' if status['init'] else 'Not initialized')
def render_logging(self):
status = self.status['logging']
init_status = self._get_enum_definition('logging', 'status', status['status'])
duration = timedelta(seconds=(status['dur'] / 1000))
self._add_item('Status', init_status)
self._add_item('Logging for', duration)
def render_telemetry(self):
status = self.status['telemetry']
init_status = self._get_enum_definition('telemetry', 'status', status['status'])
duration = timedelta(seconds=(status['dur'] / 1000))
self._add_item('Status', init_status)
self._add_item('Logging for', duration)
def render_track(self):
status = self.status['track']
init_status = self._get_enum_definition('track', 'status', status['status'])
if status['status'] == 1:
track_name = 'User defined'
else:
if status['trackId'] != 0:
track = self.track_manager.find_track_by_short_id(status['trackId'])
if track is None:
if status['status'] == 1:
track_name = 'Fixed'
else:
track_name = 'Track not found'
else:
track_name = track.name
configuration_name = track.configuration
if configuration_name and len(configuration_name):
track_name += ' (' + configuration_name + ')'
else:
track_name = 'No track detected'
in_lap = 'Yes' if status['inLap'] == 1 else 'No'
armed = 'Yes' if status['armed'] == 1 else 'No'
self._add_item('Status', init_status)
self._add_item('Track', track_name)
self._add_item('In lap', in_lap)
self._add_item('Armed', armed)
def _add_item(self, label, data):
label_widget = StatusTitle(text=label)
data_widget = StatusValue(text=str(data))
self.ids.status_grid.add_widget(label_widget)
self.ids.status_grid.add_widget(data_widget)
if len(self.ids.status_grid.children) / 2 % 2 == 0:
bg_color = RAW_STATUS_BGCOLOR_2
else:
bg_color = RAW_STATUS_BGCOLOR_1
label_widget.backgroundColor = bg_color
data_widget.backgroundColor = bg_color
def on_status(self, instance, value):
self._build_menu()
self.update()
# Generalized function for getting an enum's English
# equivalent. If the value is not found, the enum is returned
def _get_enum_definition(self, section, subsection, value, default=None):
val = default if default is not None else value
if section in self._enum_keys and subsection in self. |
n idea which parts of the ensemble affect which
part of ensembles
"""
def __init__(self, pathmover=None, ensembles=None, initial=None):
super(MoveTreeBuilder, self).__init__()
self.p_x = dict()
self.p_y = dict()
self.obj = list()
self.ensembles = []
self.pathmover = None
self.initial = None
self.traj_ens_x = dict()
self.traj_ens_y = dict()
self.traj_repl_x = dict()
self.traj_repl_y = dict()
self.ens_x = list()
self.repl_x = list()
self.options.analysis['only_canonical'] = True
self.options.analysis['label_with'] = "name" # or "class"
self.doc = None
if pathmover is not None:
self.pathmover = pathmover
if ensembles is not None:
self.ensembles = ensembles
if initial is not None:
self.initial = initial
@staticmethod
def from_scheme(scheme, hidden_ensembles=True):
"""
Initalize a new `MoveTreeBuilder` from the data in a `MoveScheme`
Parameters
----------
scheme : :obj:`openpathsampling.MoveScheme`
use the root mover of this scheme as the basis for visualization
hidden_ensembles : bool
whether to show the scheme's hidden ensembles as | well (default
True)
Returns
-------
:obj:`MoveTreeBuilder`
"""
try:
# inp is a move scheme
input_ensembles = scheme.list_initial_ensembles()
except AttributeError:
# inp is a path mover
# ??? this is nonsense in from_scheme, isn't it? you would get
# error on the thing you return below ~~~DWHS
input_ensembles = sc | heme.input_ensembles
# using network.all_ensembles forces a correct ordering
ensembles = scheme.network.all_ensembles
if hidden_ensembles:
ensembles += list(scheme.find_hidden_ensembles())
return MoveTreeBuilder(
pathmover=scheme.root_mover,
ensembles=ensembles,
initial=input_ensembles
)
@staticmethod
def _get_sub_used(mover, replica_states, level):
l = [(mover, level, replica_states)]
subs = mover.sub_replica_state(replica_states)
map(
lambda x, y, z: l.extend(MoveTreeBuilder._get_sub_used(x, y, z)),
mover.submovers, subs, [1 + level] * len(mover.submovers)
)
return l
def render(self):
doc = TreeRenderer()
self.doc = doc
level_y = dict()
self.ens_x = [None] * len(self.ensembles)
self.repl_x = [None] * len(self.ensembles)
path = self.pathmover
group = doc.g(
class_='tree'
)
tree = path.depth_pre_order(
lambda this: this,
only_canonical=self.options.analysis['only_canonical'])
total = len(tree)
for yp, (level, sub_mp) in enumerate(tree):
x_pos = - level
sub_type = sub_mp.__class__
if self.options.analysis['label_with'] == "name":
try:
sub_name = sub_mp.name
except AttributeError:
sub_name = sub_type.__name__[:-5]
elif self.options.analysis['label_with'] == "class":
sub_name = sub_type.__name__[:-5]
else: # pragma: no cover (should never occur)
raise ValueError("Bad option for 'label_with': "
+ str(self.options.analysis['label_width']))
if sub_type is paths.SampleMoveChange:
group.add(
doc.block(level, yp))
group.add(
doc.label(
x_pos,
yp,
sub_name,
css_class=['name'] + [sub_type.__name__]
)
)
else:
group.add(
doc.block(
x_pos,
yp,
)
)
group.add(
doc.label(
x_pos,
yp,
sub_name
)
)
if level - 1 in level_y \
and level_y[level - 1] == yp - 1:
group.add(
doc.vertical_connector(
x_pos + 1,
yp,
yp - 1
)
)
if level + 1 in level_y:
del level_y[level + 1]
if level in level_y and level_y[level]:
group.add(
doc.vertical_connector(
x_pos + 1,
yp,
level_y[level]
)
)
level_y[level] = yp
doc.add(group)
group = doc.g(
class_='ensembles'
)
for ens_idx, ens in enumerate(self.ensembles):
txt = chr(ens_idx + 65)
label = ens.name if hasattr(ens, 'name') else \
ens.__class__.__name__[:-8]
group.add(
doc.label(
ens_idx,
-1,
'[' + txt + '] ' + label,
css_class=['head']
)
)
group.add(
doc.vertical_hook(
ens_idx,
-1,
ens_idx,
total
)
)
max_level = 0
rset = openpathsampling.pathmover_inout.ReplicaStateSet
initial_rs = rset.from_ensembles(self.initial)
subs = MoveTreeBuilder._get_sub_used(self.pathmover, initial_rs, 0)
# this checks if the mover can actually be run without problems
# assert(
# Counter(dict(initial_rs)) >= self.pathmover.in_out_matrix.minimal)
for yp, (level, sub_mp) in enumerate(
path.depth_pre_order(
lambda this: this,
only_canonical=self.options.analysis['only_canonical'])):
sub = subs[yp]
if level > max_level:
max_level = level
possible_input_replica_states = [Counter(dict(s)) for s in sub[2]]
sub_io_set = sub_mp.in_out
# minimal_input_replica_states = sub_io_set.minimal
# in_ens = sub_mp.input_ensembles
# out_ens = sub_mp.output_ensembles
possible_ins = [
i.ins for i in sub_io_set
if any(s >= i.ins for s in possible_input_replica_states)]
possible_outs = [
i.outs for i in sub_io_set
if any(s >= i.ins for s in possible_input_replica_states)]
in_ens = reduce(lambda a, b: a | b, possible_ins, Counter())
out_ens = reduce(lambda a, b: a | b, possible_outs, Counter())
for ens_idx, ens in enumerate(self.ensembles):
txt = chr(ens_idx + 65)
show = False
if in_ens is None or None in in_ens or ens in in_ens:
group.add(
doc.connector(
ens_idx,
yp - 0.15,
css_class=['input']
)
)
show = True
if out_ens is None or None in out_ens or ens in out_ens:
group.add(
doc.connector(
ens_idx,
yp + 0.15,
css_class=['output'])
)
show = True
if show:
group.add(
doc.connector(
ens_idx,
yp,
|
"""
Boolean Operations
~~~~~~~~~~~~~~~~~~
Perform boolean operations with closed surfaces (intersect, cut, etc.).
Boolean/topological operations (intersect, cut, etc.) methods are implemented
for :class:`pyvista.PolyData` mesh types only and are accessible directly from
any :class:`pyvista.PolyD | ata` mesh. Check out :class:`pyvista.PolyDataFilters`
and take a look at the following filters:
* :func:`pyvista.PolyDataFilters.boolean_add`
* :func:`pyvista.PolyDataFilters.boolean_cut`
* :func:`pyvista.PolyDataFilters.boolean_difference`
* :func:`pyvista.PolyDataFilters.boolean_union`
For merging, the ``+` | ` operator can be used between any two meshes in PyVista
which simply calls the ``.merge()`` filter to combine any two meshes.
Similarly, the ``-`` operator can be used between any two :class:`pyvista.PolyData`
meshes in PyVista to cut the first mesh by the second.
"""
# sphinx_gallery_thumbnail_number = 6
import pyvista as pv
import numpy as np
def make_cube():
x = np.linspace(-0.5, 0.5, 25)
grid = pv.StructuredGrid(*np.meshgrid(x, x, x))
return grid.extract_surface().triangulate()
# Create to example PolyData meshes for boolean operations
sphere = pv.Sphere(radius=0.65, center=(0, 0, 0))
cube = make_cube()
p = pv.Plotter()
p.add_mesh(sphere, color="yellow", opacity=0.5, show_edges=True)
p.add_mesh(cube, color="royalblue", opacity=0.5, show_edges=True)
p.show()
###############################################################################
# Boolean Add
# +++++++++++
#
# Add all of the two meshes together using the
# :func:`pyvista.PolyDataFilters.boolean_add` filter or the ``+`` operator.
#
# Order of operations does not matter for boolean add as the entirety of both
# meshes are appended together.
add = sphere + cube
add.plot(opacity=0.5, color=True, show_edges=True)
###############################################################################
# Boolean Cut
# +++++++++++
#
# Perform a boolean cut of ``a`` using ``b`` with the
# :func:`pyvista.PolyDataFilters.boolean_cut` filter or the ``-`` operator
# since both meshes are :class:`pyvista.PolyData`.
#
# Order of operations does not matter for boolean cut.
cut = cube - sphere
p = pv.Plotter()
p.add_mesh(cut, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Difference
# ++++++++++++++++++
#
# Combine two meshes and retains only the volume in common between the meshes
# using the :func:`pyvista.PolyDataFilters.boolean_difference` method.
#
# Note that the order of operations for a boolean difference will affect the
# results.
diff = sphere.boolean_difference(cube)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
diff = cube.boolean_difference(sphere)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Union
# +++++++++++++
#
# Combine two meshes and attempts to create a manifold mesh using the
# :func:`pyvista.PolyDataFilters.boolean_union` method.
#
# Order of operations does not matter for boolean union.
union = sphere.boolean_union(cube)
p = pv.Plotter()
p.add_mesh(union, opacity=0.5, show_edges=True, color=True)
p.show()
|
#!/usr/bin/env conda-execute
# conda execute
# env:
# - python 2.7.*
# - conda-smithy
# - pygithub 1.*
# - six
# - conda-build
# channels:
# - conda-forge
# run_with: python
from __future__ import print_function
import argparse
import collections
import os
import six
from github import Github
import github
import yaml
from conda_build.metadata import MetaData
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('feedstocks_clone', help="The location of the feedstocks directory within the conda-forge/feedstocks clone.")
args = parser.parse_args()
from conda_smithy.github import gh_token
token = gh_token()
gh = Github(token)
conda_forge = gh.get_organization('conda-forg | e')
teams = {team.name: team for team in conda_forge.get_teams()}
feedstocks_path = args.feedstocks_clone
packages_visited = set()
all_members = set()
from random import choice
superlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',
'magnificent', 'wonderous', 'stunning', 'astonishing | ', 'superb',
'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',
'exalted', 'standout', 'smashing']
# Go through each of the feedstocks and ensure that the team is up to date and that
# there is nobody in the team which doesn't belong (i.e. isn't in the maintainers list).
for package_name in os.listdir(feedstocks_path):
print("Checking {}".format(package_name))
packages_visited.add(package_name)
feedstock = os.path.join(feedstocks_path, package_name)
recipe = os.path.join(feedstock, 'recipe', 'meta.yaml')
if not os.path.exists(recipe):
print("The {} feedstock is recipe less".format(package_name))
continue
meta = MetaData(os.path.dirname(recipe))
contributors = meta.meta.get('extra', {}).get('recipe-maintainers', [])
if not isinstance(contributors, list):
# Deal with a contribution list which has dashes but no spaces
# (e.g. https://github.com/conda-forge/pandoc-feedstock/issues/1)
contributors = [contributors.lstrip('-')]
contributors = set(handle.lower() for handle in contributors)
all_members.update(contributors)
# If the team already exists, get hold of it.
team = teams.get(package_name)
if not team:
print("Team {} does not exist in conda-forge organization".format(package_name))
continue
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in contributors - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - contributors:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_member, package_name))
# The following works, it is just a bit scary!
# headers, data = team._requester.requestJsonAndCheck(
# "DELETE",
# team.url + "/memberships/" + old_member)
# Create and administer the all-members team.
team = teams.get('all-members')
if not team:
raise RuntimeError("Team all-members does not exist in conda-forge organization")
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in all_members - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - all_members:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM all-members".format(old_member))
# Remove any teams which don't belong any more (because there is no longer a feedstock).
for team_to_remove in set(teams.keys()) - set(packages_visited):
if team_to_remove in ['Core',
'conda-forge.github.io',
'all-members',
'conda-forge-anvil',
'conda-forge-webservices',
'staged-recipes']:
print('Keeping ', team_to_remove)
continue
print("THE {} TEAM NEEDS TO BE REMOVED.".format(team_to_remove))
# The following works, it is just a bit scary!
# teams[team_to_remove].delete()
|
tion_namespace'] = config.namespace
else:
if instance_namespace:
if self._check_unique_namespace_instance(instance_namespace):
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# The attribute on the apps 'app_name' is a misnomer, it should be
# 'application_namespace'.
application_namespace = apphook_pool.get_apphook(apphook).app_name
if application_namespace and not instance_namespace:
if self._check_unique_namespace_instance(application_namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# OK, there are zero instances of THIS app that use the
# default instance namespace, so, since the user didn't
# provide one, we'll use the default. NOTE: The following
# line is really setting the "instance namespace" of the
# new app to the app’s "application namespace", which is
# the default instance namespace.
self.cleaned_data['application_namespace'] = application_namespace
if instance_namespace and not apphook:
self.cleaned_data['application_namespace'] = None
if application_config and not apphook:
self.cleaned_data['application_configs'] = None
return self.cleaned_data
def clean_xframe_options(self):
if 'xframe_options' not in self.fields:
return # nothing to do, field isn't present
xframe_options = self.cleaned_data['xframe_options']
if xframe_options == '':
return Page._meta.get_field('xframe_options').default
return xframe_options
def clean_overwrite_url(self):
path_override = self.cleaned_data.get('overwrite_url')
if path_override:
path = path_override.strip('/')
else:
path = self.instance.get_path_for_slug(self.title_obj.slug, self._language)
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=self.instance,
)
self.cleaned_data['path'] = path
return path_override
def has_changed_apphooks(self):
changed_data = self.changed_data
if 'application_urls' in changed_data:
return True
return 'application_namespace' in changed_data
def update_apphooks(self):
# User has changed the apphooks on the page.
# Update the public version of the page to reflect this change immediately.
public_id = self.instance.publisher_public_id
self._meta.model.objects.filter(pk=public_id).update(
application_urls=self.instance.application_urls,
application_namespace=(self.instance.application_namespace or None),
)
# Connects the apphook restart handler to the request finished signal
set_restart_trigger()
def save(self, *args, **kwargs):
data = self.cleaned_data
page = super().save(*args, **kwargs)
page.update_translations(
self._language,
path=data['path'],
redirect=(data.get('redirect') or None),
publisher_state=PUBLISHER_STATE_DIRTY,
has_url_overwrite=bool(data.get('overwrite_url')),
)
is_draft_and_has_public = page.publisher_is_draft and page.publisher_public_id
if is_draft_and_has_public and self.has_changed_apphooks():
self.update_apphooks()
page.clear_cache(menu=True)
return page
class PagePermissionForm(forms.ModelForm):
class Meta:
model = Page
fields = ['login_required', 'limit_visibility_in_menu']
def save(self, *args, **kwargs):
page = super().save(*args, **kwargs)
page.clear_cache(menu=True)
clear_permission_cache()
return page
class PageTreeForm(forms.Form):
position = forms.IntegerField(initial=0, required=True)
target = forms.ModelChoiceField(queryset=Page.objects.none(), required=False)
def __init__(self, *args, **kwargs):
self.page = kwargs.pop('page')
self._site = kwargs.pop('site', Site.objects.get_current())
super().__init__(*args, **kwargs)
self.fields['target'].queryset = Page.objects.drafts().filter(
node__site=self._site,
is_page_type=self.page.is_page_type,
)
def get_root_nodes(self):
# TODO: this needs to avoid using the pages accessor directly
nodes = TreeNode.get_root_nodes()
return nodes.exclude(cms_pages__is_page_type=not(self.page.is_page_type))
def get_tree_options(self):
position = self.cleaned_data['position']
target_page = self.cleaned_data.get('target')
parent_node = target_page.node if target_page else None
if parent_node:
return self._get_tree_o | ptions_for_parent(parent_node, position)
return self._get_tree_options_for_root(position)
def _get_tree_options_for_root(self, position):
siblings = self.get_root_nodes().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node as the last root | node,
# relative to the current site.
return (siblings.reverse()[0], 'right')
return (target_node, 'left')
def _get_tree_options_for_parent(self, parent_node, position):
if position == 0:
return (parent_node, 'first-child')
siblings = parent_node.get_children().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node to be the parent's first child
return (parent_node, 'last-child')
return (target_node, 'left')
class MovePageForm(PageTreeForm):
def clean(self):
cleaned_data = super().clean()
if self.page.is_home and cleaned_data.get('target'):
self.add_error('target', force_str(_('You can\'t move the home page inside another page')))
return cleaned_data
def get_tree_options(self):
options = super().get_tree_options()
target_node, target_node_position = options
if target_node_position != 'left':
return (target_node, target_node_position)
node = self.page.node
node_is_first = node.path < target_node.path
if node_is_first and node.is_sibling_of(target_node):
# The node being moved appears before the target node
# and is a sibling of the target node.
# The user is moving from left to right.
target_node_position = 'right'
elif node_is_first:
# The node being moved appears before the target node
# but is not a sibling of the target node.
# The user is moving from right to left.
target_node_position = 'left'
else:
# The node being moved appears after the target node.
# The user is moving from right to left.
target_node_position = 'left'
return (target_node, target_node_position)
def move_page(self):
self.page.move_page(*self.get_tree_options())
class CopyPageForm(PageTreeForm):
source_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=True)
copy_permissions = forms.BooleanField(initial=False, r |
from blinker import Namespace
import logging
import json
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MySignal:
def __init__(self):
self.signals = {}
self.signal = Namespace()
def init_app(self, app):
pass
def addSignal(self, classname, option):
logger.debug('add signal {}.{}'.format(classname, option))
if '{}.{}'.format(classname, option) not in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
def send(self, classname, option, **extra):
logger.debug('send signal {}.{} with: {}'.format(classname, option, extra))
logger.info('send signal {}.{}'.format(classname, option))
if '{}.{}'.format(classname, option) in self.signals.keys():
payload = '{}.{}'.format(classname, option)
if extra:
extra['sender'] = payload
payload = json.dumps(extra)
self.signals['{}.{}'.format(classname, option)].send(str(payload))
def connect(self, clas | sname, option, func):
logger.debug('connect signal {}.{} with func: {}()'.format(classname, option, func.__name__))
if not '{}.{}'.format(classname, option) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
self.signals['{}.{}'.format(classname, option)].connect(func)
def disconnect(self, classname, option, func):
if '{}.{}'.format(classname, o | ption) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)].disconnect(func)
|
# -*- coding: utf-8 -*-
import re
import string
import random
import pbkdf2
HASHING_ITERATIONS = 400
ALLOWED_IN_SALT = string.ascii_letters + string.digits + './'
ALLOWD_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
def gener | ate_random_string(len=12, allowed_chars=string.ascii_letters+string.digits):
return ''.join(random.choice(allowed_chars) for i in range(len))
| def make_password(password=None):
if password is None:
raise ValueError('password is required')
salt = generate_random_string(len=32, allowed_chars=ALLOWED_IN_SALT)
return pbkdf2.crypt(password, salt=salt, iterations=HASHING_ITERATIONS)
def check_password(password, hashed_password):
return hashed_password == pbkdf2.crypt(password, hashed_password)
def validate_password(password=None):
"""
ALLOWED_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
"""
if password is None:
raise ValueError('password is required')
if re.match(ALLOWD_PASSWORD_PATTERN, password):
return True
return False
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entity_health import EntityHealth
class PartitionHealth(EntityHealth):
"""Information about the health of a Service Fabric partition.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param hea | lth_events: The list of health events reported on the entity.
:type health_events: list of :class:`HealthEvent
<azure.servicefabric.models.HealthEvent>`
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric. | models.HealthEvaluationWrapper>`
:param health_statistics:
:type health_statistics: :class:`HealthStatistics
<azure.servicefabric.models.HealthStatistics>`
:param partition_id:
:type partition_id: str
:param replica_health_states: The list of replica health states associated
with the partition.
:type replica_health_states: list of :class:`ReplicaHealthState
<azure.servicefabric.models.ReplicaHealthState>`
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'},
}
def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_health_states=None):
super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics)
self.partition_id = partition_id
self.replica_health_states = replica_health_states
|
"""
Script that trains graph-conv models on ChEMBL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from chembl_datasets import load_chembl
# Load ChEMBL dataset
chembl_tasks, datasets, transformers = load_chembl(
shard_size=2000, featurizer="GraphConv", set="5thresh", split="random")
train_dataset, valid_datas | et, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 128
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(d | c.nn.GraphConv(128, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(128, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(256, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
len(chembl_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
|
units (int): Size of the hidden layers
dist (str): Output distribution, parameterized by FC output
logits.
act (Any): Activation function
"""
super().__init__()
self.layrs = layers
self.units = units
self.act = act
if not act:
self.act = nn.ELU
self.dist = dist
self.input_size = input_size
self.output_size = output_size
self.layers = []
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = units
self.layers.append(Linear(cur_size, output_size))
self.model = nn.Sequential(*self.layers)
def forward(self, x):
x = self.model(x)
if self.output_size == 1:
x = torch.squeeze(x)
if self.dist == "normal":
output_dist = td.Normal(x, 1)
elif self.dist == "binary":
output_dist = td.Bernoulli(logits=x)
else:
raise NotImplementedError("Distribution type not implemented!")
return td.Independent(output_dist, 0)
# Represents dreamer policy
class ActionDecoder(nn.Module):
"""ActionDecoder is the policy module in Dreamer.
It outputs a distribution parameterized by mean and std, later to be
transformed by a custom TanhBijector in utils.py for Dreamer.
"""
def __init__(self,
input_size: int,
action_size: int,
layers: int,
units: int,
dist: str = "tanh_normal",
act: ActFunc = None,
min_std: float = 1e-4,
init_std: float = 5.0,
mean_scale: float = 5.0):
"""Initializes Policy
Args:
input_size (int): Input size to network
action_size (int): Action space size
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, with tanh_normal implemented
act (Any): Activation function
min_std (float): Minimum std for output distribution
init_std (float): Intitial std
mean_scale (float): Augmenting mean output from FC network
"""
super().__init__()
self.layrs = layers
self.units = units
self.dist = dist
self.act = act
if not act:
self.act = nn.ReLU
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.action_size = action_size
self.layers = []
self.softplus = nn.Softplus()
# MLP Construction
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = self.units
if self.dist == "tanh_normal":
self.layers.append(Linear(cur_size, 2 * action_size))
elif self.dist == "onehot":
self.layers.append(Linear(cur_size, action_size))
self.model = nn.Sequential(*self.layers)
# Returns distribution
def forward(self, x):
raw_init_std = np.log(np.exp(self.init_std) - 1)
x = self.model(x)
if self.dist == "tanh_normal":
mean, std = torch.chunk(x, 2, dim=-1)
mean = self.mean_scale * torch.tanh(mean / self.mean_scale)
std = self.softplus(std + raw_init_std) + self.min_std
dist = td.Normal(mean, std)
transforms = [TanhBijector()]
dist = td.transformed_distribution.TransformedDistribution(
dist, transforms)
dist = td.Independent(dist, 1)
elif self.dist == "onehot":
dist = td.OneHotCategorical(logits=x)
raise NotImplementedError("Atari not implemented yet!")
return dist
# Represents TD model in PlaNET
class RSSM(nn.Module):
"""RSSM is the core recurrent part of the PlaNET module. It consists of
two networks, one (obs) to calculate posterior beliefs and states and
the second (img) to calculate prior beliefs and states. The prior network
takes in the previous state and action, while the posterior network takes
in the previous state, action, and a latent embedding of the most recent
observation.
"""
def __init__(self,
action_size: int,
embed_size: int,
stoch: int = 30,
deter: int = 200,
hidden: int = 200,
act: ActFunc = None):
"""Initializes RSSM
Args:
action_size (int): Action space size
embed_size (int): Size of ConvEncoder embedding
stoch (int): Size of the distributional hidden state
deter (int): Size of the deterministic hidden state
hidden (int): General size of hidden layers
act (Any): Activation function
"""
super().__init__()
self.stoch_size = stoch
self.deter_size = deter
self.hidden_size = hidden
self.act = act
if act is None:
self.act = nn.ELU
self.obs1 = Linear(embed_size + deter, hidden)
self.obs2 = Linear(hidden, 2 * stoch)
self.cell = GRUCell(self.hidden_size, hidden_size=self.deter_size)
self.img1 = Linear(stoch + action_size, hidden)
self.img2 = Linear(deter, hidden)
self.img3 = Linear(hidden, 2 * stoch)
self.softplus = nn.Softplus
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
def get_initial_state(self, batch_size: int) -> List[TensorType]:
"""Returns the inital state for the RSSM, which consists of mean,
std for the stochastic state, the sampled stochastic hidden state
(from mean, std), and the deterministic hidden state, which is
pushed through the GRUCell.
Args:
batch_size (int): Batch size for initial state
Returns:
List of tensors
"""
return [
torch.zeros(batch_size, | self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.deter_size).to(self.device),
]
def observe(self,
embed: TensorType,
action: TensorType,
| state: List[TensorType] = None
) -> Tuple[List[TensorType], List[TensorType]]:
"""Returns the corresponding states from the embedding from ConvEncoder
and actions. This is accomplished by rolling out the RNN from the
starting state through eacn index of embed and action, saving all
intermediate states between.
Args:
embed (TensorType): ConvEncoder embedding
action (TensorType): Actions
state (List[TensorType]): Initial state before rollout
Returns:
Posterior states and prior states (both List[TensorType])
"""
if state is None:
state = self.get_initial_state(action.size()[0])
embed = embed.permute(1, 0, 2)
action = action.permute(1, 0, 2)
priors = [[] for i in range(len(state))]
posts = [[] for i in range(len(state))]
last = (state, state)
for index in range(len(action)):
# Tuple of post and prior
last = self.obs_step(last[0], action[index], embed[index])
[o.append(s) for s, o in zip(last[0], posts)]
[o.append(s) for s, o in zip(last[1], priors)]
prior = [torch.stack(x, dim=0) for x in priors]
post = [torch.stack(x, dim=0) for x in posts]
prior = [e.permute(1, 0, 2) for e in prior]
post = [e.permute(1, 0, 2) for e in post]
return post, prior
def imagine(self, action: TensorType,
state: List[TensorType] = None) -> List[TensorType]:
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Muratahan Aykol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software w | ithout restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import numpy as np
xdatcar = open('XDATCAR', 'r')
xyz = open('XDATCAR.xyz', 'w')
xyz_fract = open('XDATCAR_fract.xyz', 'w')
system = xdatcar.readline()
scale = float(xdatcar.readline().rstrip('\n'))
print scale
#get lattice vectors
a1 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a2 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a3 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
print a1
print a2
print a3
#Save scaled lattice vectors
lat_rec = open('lattice.vectors', 'w')
lat_rec.write(str(a1[0])+' '+str(a1[1])+' '+str(a1[2])+'\n')
lat_rec.write(str(a2[0])+' '+str(a2[1])+' '+str(a2[2])+'\n')
lat_rec.write(str(a3[0])+' '+str(a3[1])+' '+str(a3[2]))
lat_rec.close()
#Read xdatcar
element_names = xdatcar.readline().rstrip('\n').split()
element_dict = {}
element_numbers = xdatcar.readline().rstrip('\n').split()
i = 0
N = 0
for t in range(len(element_names)):
element_dict[element_names[t]] = int(element_numbers[i])
N += int(element_numbers[i])
i += 1
print element_dict
while True:
line = xdatcar.readline()
if len(line) == 0:
break
xyz.write(str(N) + "\ncomment\n")
xyz_fract.write(str(N)+"\ncomment\n")
for el in element_names:
for i in range(element_dict[el]):
p = xdatcar.readline().rstrip('\n').split()
coords = np.array([ float(s) for s in p ])
# print coords
cartesian_coords = coords[0]*a1+coords[1]*a2+coords[2]*a3
xyz.write(el+ " " + str(cartesian_coords[0])+ " " + str(cartesian_coords[1]) + " " + str(cartesian_coords[2]) +"\n")
xyz_fract.write(el+ " " + str(coords[0])+ " " + str(coords[1]) + " " + str(coords[2]) +"\n")
xdatcar.close()
xyz.close()
xyz_fract.close()
|
__all__ = ['jazPrint', 'jazShow']
class jazPrint:
def __init__(self):
self.command = "print";
def | call(self, interpreter, arg):
return interpreter.GetScope().GetStackTop()
class jazShow:
def __init__(self):
self.command = "show";
def call(self, interpreter, arg):
return arg;
# A dictionary of the classes in this file
# used to autoload t | he functions
Functions = {'jazShow': jazShow, 'jazPrint': jazPrint}
|
platform
import socket
import time
import traceback
import mozprocess
__all__ = ["SeleniumServer", "ChromeDriverServer", "EdgeChromiumDriverServer", "OperaDriverServer",
"GeckoDriverServer", "InternetExplorerDriverServer", "EdgeDriverServer",
"ServoDriverServer", "WebKitDriverServer", "WebDriverServer"]
class WebDriverServer(object):
__metaclass__ = abc.ABCMeta
default_base_path = "/"
def __init__(self, logger, binary, host="127.0.0.1", port=None,
base_path="", env=None, args=None):
if binary is None:
raise ValueError("WebDriver server binary must be given "
"to --webdriver-binary argument")
self.logger = logger
self.binary = binary
self.host = host
if base_path == "":
self.base_path = self.default_base_path
else:
self.base_path = base_path
self.env = os.environ.copy() if env is None else env
self._port = port
self._cmd = None
self._args = args if args is not None else []
self._proc = None
@abc.abstractmethod
def make_command(self):
"""Returns the full command for starting the server process as a list."""
def start(self, block=False):
try:
self._run(block)
except KeyboardInterrupt:
self.stop()
def _run(self, block):
self._cmd = self.make_command()
self._proc = mozprocess.ProcessHandler(
self._cmd,
| processOutputLine=self.on_output,
env=self.env,
storeOutput=False)
self.logger.debug("Starting WebDriver: %s" % ' '.join(self._cmd))
try:
self._proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"WebDriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for WebDriver | to become accessible: %s" % self.url)
try:
wait_for_service((self.host, self.port))
except Exception:
self.logger.error(
"WebDriver was not accessible "
"within the timeout:\n%s" % traceback.format_exc())
raise
if block:
self._proc.wait()
def stop(self, force=False):
if self.is_alive:
return self._proc.kill()
return not self.is_alive
@property
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self._cmd))
@property
def pid(self):
if self._proc is not None:
return self._proc.pid
@property
def url(self):
return "http://%s:%i%s" % (self.host, self.port, self.base_path)
@property
def port(self):
if self._port is None:
self._port = get_free_port()
return self._port
class SeleniumServer(WebDriverServer):
default_base_path = "/wd/hub"
def make_command(self):
return ["java", "-jar", self.binary, "-port", str(self.port)] + self._args
class ChromeDriverServer(WebDriverServer):
def __init__(self, logger, binary="chromedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeChromiumDriverServer(WebDriverServer):
def __init__(self, logger, binary="msedgedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeDriverServer(WebDriverServer):
def __init__(self, logger, binary="microsoftwebdriver.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class OperaDriverServer(ChromeDriverServer):
def __init__(self, logger, binary="operadriver", port=None,
base_path="", args=None):
ChromeDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
class InternetExplorerDriverServer(WebDriverServer):
def __init__(self, logger, binary="IEDriverServer.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class GeckoDriverServer(WebDriverServer):
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args
class SafariDriverServer(WebDriverServer):
def __init__(self, logger, binary="safaridriver", port=None, args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class ServoDriverServer(WebDriverServer):
def __init__(self, logger, binary="servo", binary_args=None, host="127.0.0.1",
port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.binary_args = binary_args
def make_command(self):
command = [self.binary,
"--webdriver=%s" % self.port,
"--hard-fail",
"--headless"] + self._args
if self.binary_args:
command += self.binary_args
return command
class WebKitDriverServer(WebDriverServer):
def __init__(self, logger, binary=None, port=None, args=None):
WebDriverServer.__init__(self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary, "--port=%s" % str(self.port)] + self._args
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port():
"""Get a random unbound port"""
while True:
s = socket.socket()
try:
s.bind(("127.0.0.1", 0))
except socket.error:
continue
else:
return s.getsockname()[1]
finally:
s.close()
def wait_for_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
|
rd, database
def _conn_key(self, instance):
''' Return a key to use for the connection cache
'''
host, username, password, database = self._get_access_info(instance)
return '%s:%s:%s:%s' % (host, username, password, database)
def _conn_string(self, instance):
''' Return a connection string to use with adodbapi
'''
host, username, password, database = self._get_access_info(instance)
conn_str = 'Provider=SQLOLEDB;Data Source=%s;Initial Catalog=%s;' \
% (host, database)
if username:
conn_str += 'User ID=%s;' % (username)
if password:
conn_str += 'Password=%s;' % (password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str
def get_cursor(self, instance, cache_failure=False):
'''
Return a cursor to execute query against the db
Cursor are cached in the self.connections dict
'''
conn_key = self._conn_key(instance)
host = instance.get('host')
database = instance.get('database')
service_check_tags = [
'host:%s' % host,
'db:%s' % database
]
if conn_key in self.failed_connections:
raise self.failed_connections[conn_key]
if conn_key not in self.connections:
try:
conn = adodbapi.connect(
self._conn_string(instance),
timeout=int(instance.get('command_timeout',
self.DEFAULT_COMMAND_TIMEOUT))
)
self.connections[conn_key] = conn
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
except Exception:
cx = "%s - %s" % (host, database)
message = "Unable to connect to SQL Server for instance %s." % cx
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
password = instance.get('password')
tracebk = traceback.format_exc()
if password is not None:
tracebk = tracebk.replace(password, "*" * 6)
# Avoid multiple connection timeouts (too slow):
# save the exception, re-raise it when needed
cxn_failure_exp = SQLConnectionError("%s \n %s" % (me | ssage, tracebk))
if cache_failure:
self.failed_connections[conn_key] = cxn_failure_exp |
raise cxn_failure_exp
conn = self.connections[conn_key]
cursor = conn.cursor()
return cursor
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
cursor = self.get_cursor(instance, cache_failure=True)
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric %s is of type Base and shouldn't be reported this way",
counter_name)
base_name = None
if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]:
# This is an ugly hack. For certains type of metric (PERF_RAW_LARGE_FRACTION
# and PERF_AVERAGE_BULK), we need two metrics: the metrics specified and
# a base metrics to get the ratio. There is no unique schema so we generate
# the possible candidates and we look at which ones exist in the db.
candidates = (counter_name + " base",
counter_name.replace("(ms)", "base"),
counter_name.replace("Avg ", "") + " base"
)
try:
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: %s for metric: %s", base_name, counter_name)
except Exception, e:
self.log.warning("Could not get counter_name of base for metric: %s", e)
self.close_cursor(cursor)
return sql_type, base_name
def check(self, instance):
"""
Fetch the metrics from the sys.dm_os_performance_counters table
"""
cursor = self.get_cursor(instance)
custom_tags = instance.get('tags', [])
instance_key = self._conn_key(instance)
metrics_to_collect = self.instances_metrics[instance_key]
for metric in metrics_to_collect:
try:
metric.fetch_metric(cursor, custom_tags)
except Exception, e:
self.log.warning("Could not fetch metric %s: %s" % (metric.datadog_name, e))
self.close_cursor(cursor)
def close_cursor(self, cursor):
"""
We close the cursor explicitly b/c we had proven memory leaks
We handle any exception from closing, although according to the doc:
"in adodbapi, it is NOT an error to re-close a closed cursor"
"""
try:
cursor.close()
except Exception as e:
self.log.warning("Could not close adodbapi cursor\n{0}".format(e))
class SqlServerMetric(object):
'''General class for common methods, should never be instantiated directly
'''
def __init__(self, datadog_name, sql_name, base_name,
report_function, instance, tag_by, logger):
self.datadog_name = datadog_name
self.sql_name = sql_name
self.base_name = base_name
self.report_function = report_function
self.instance = instance
self.tag_by = tag_by
self.instances = None
self.past_values = {}
self.log = logger
def fetch_metrics(self, cursor, tags):
raise NotImplementedError
class SqlSimpleMetric(SqlServerMetric):
def fetch_metric(self, cursor, tags):
query_base = '''
select instance_name, cntr_value
from sys.dm_os_performance_counters
where counter_name = ?
'''
if self.instance == ALL_INSTANCES:
query = query_base + "and instance_name!= '_Total'"
query_content = (self.sql_name,)
else:
query = query_base + "and instance_name=?"
query_content = (self.sql_name, self.instance)
cursor.execute(query, query_content)
rows = cursor.fetchall()
for instance_name, cntr_value in rows:
metric_tags = tags
if self.instance == ALL_INSTANCES:
metric_tags = metric_tags + ['%s:%s' % (self.tag_by, instance_name.strip())]
self.report_function(self.datadog_name, cntr_value,
tags=metric_tags)
class SqlFractionMetric(SqlServerMetric):
def set_instances(self, cursor):
if self.instance == ALL_INSTANCES:
cursor.execute(INSTANCES_QUERY, (self.sql_name,))
self.instances = [row.instance_name for row in cursor.fetchall()]
else:
self.instances = [self.instance]
def fetch_metric(self, cursor, tags):
'''
Because we need to query the metrics by matching pairs, we can't query
all of them together without having to perform some matching based on
the name afterwards so instead we query instance by instance.
We cache the list of instance so that we don't have to look it up every time
'''
if self.instances is None:
self.set_instances(cursor)
for instance in self.instances:
cursor.execute(VALUE_AND_BASE_QUERY, (self.sql_name, self.base_name, instance))
rows = cursor.fetchall()
if len(rows) != 2:
sel |
""
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST)
if host is None:
_LOGGER.error("No TV found in configuration file or with discovery")
return False
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
mac = config.get(CONF_MAC)
name = config.get(CONF_NAME)
customize = config.get(CONF_CUSTOMIZE)
config = hass.config.path(config.get(CONF_FILENAME))
setup_tv(host, mac, name, customize, config, hass, add_devices)
def setup_tv(host, mac, name, customize, config, hass, add_devices):
"""Set up a LG WebOS TV based on host parameter."""
from pylgtv import WebOsClient
from pylgtv import PyLGTVPairException
from websockets.exceptions import ConnectionClosed
client = WebOsClient(host, config)
if not client.is_registered():
if host in _CONFIGURING:
# Try to pair.
try:
client.register()
except PyLGTVPairException:
_LOGGER.warning(
"Connected to LG webOS TV %s but not paired", host)
return
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
_LOGGER.error("Unable to connect to host %s", host)
return
else:
# Not registered, request configuration.
_LOGGER.warning("LG webOS TV %s needs to be paired", host)
request_configuration(
host, mac, name, customize, config, hass, add_devices)
return
# If we came here and configuring this host, mark as done.
if client.is_registered() and host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
add_devices([LgWebOSDevice(host, mac, name, customize, config)], True)
def request_configuration(
host, mac, name, customize, config, hass, add_devices):
"""Request configuration steps from the user."""
configu | rator = get_component('configurator')
# We got an error if this | method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], 'Failed to pair, please try again.')
return
# pylint: disable=unused-argument
def lgtv_configuration_callback(data):
"""Handle configuration changes."""
setup_tv(host, mac, name, customize, config, hass, add_devices)
_CONFIGURING[host] = configurator.request_config(
hass, name, lgtv_configuration_callback,
description='Click start and accept the pairing request on your TV.',
description_image='/static/images/config_webos.png',
submit_caption='Start pairing request'
)
class LgWebOSDevice(MediaPlayerDevice):
"""Representation of a LG WebOS TV."""
def __init__(self, host, mac, name, customize, config):
"""Initialize the webos device."""
from pylgtv import WebOsClient
from wakeonlan import wol
self._client = WebOsClient(host, config)
self._wol = wol
self._mac = mac
self._customize = customize
self._name = name
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._volume = 0
self._current_source = None
self._current_source_id = None
self._state = STATE_UNKNOWN
self._source_list = {}
self._app_list = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve the latest data."""
from websockets.exceptions import ConnectionClosed
try:
current_input = self._client.get_input()
if current_input is not None:
self._current_source_id = current_input
if self._state in (STATE_UNKNOWN, STATE_OFF):
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
self._current_source = None
self._current_source_id = None
if self._state is not STATE_OFF:
self._muted = self._client.get_muted()
self._volume = self._client.get_volume()
self._source_list = {}
self._app_list = {}
conf_sources = self._customize.get(CONF_SOURCES, [])
for app in self._client.get_apps():
self._app_list[app['id']] = app
if conf_sources:
if app['id'] == self._current_source_id:
self._current_source = app['title']
self._source_list[app['title']] = app
elif (app['id'] in conf_sources or
any(word in app['title']
for word in conf_sources) or
any(word in app['id']
for word in conf_sources)):
self._source_list[app['title']] = app
else:
self._current_source = app['title']
self._source_list[app['title']] = app
for source in self._client.get_inputs():
if conf_sources:
if source['id'] == self._current_source_id:
self._source_list[source['label']] = source
elif (source['label'] in conf_sources or
any(source['label'].find(word) != -1
for word in conf_sources)):
self._source_list[source['label']] = source
else:
self._source_list[source['label']] = source
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
self._state = STATE_OFF
self._current_source = None
self._current_source_id = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return sorted(self._source_list.keys())
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._current_source_id in self._app_list:
icon = self._app_list[self._current_source_id]['largeIcon']
if not icon.startswith('http'):
icon = self._app_list[self._current_source_id]['icon']
return icon
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mac:
return SUPPORT_WEBOSTV | SUPPORT_TURN_ON
return SUPPORT_WEBOSTV
def turn_off(self):
"""Turn off media player."""
from websockets.exceptions import ConnectionClosed
self._state = STATE_OFF
try:
self._client.power_off()
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
pass
def turn_on(self):
"""Turn on the media player."""
if self._mac:
self._wol.send_magic_packet(self._mac)
def volume_up(s |
# -*- coding: utf-8 -*-
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django',
'USER': 'django',
'PASSWORD': 'PUTPAS | SWORDHERE',
'HOST': '1 | 27.0.0.1',
'PORT': '5432',
}
} |
in Ok completely surround all sigmas.
Returns either (True, xlist) with xlist a list of all a+t needed,
or (False, [])
"""
xlist = []
for s in sigmas:
if s.is_infinity():
continue
ok, xlist1 = is_sigma_surrounded(s, alist, debug)
if not ok:
if debug:
print("{} is not surrounded".format(s))
return False, s
if debug:
print("{} is surrounded by {}".format(s, xlist1))
for a in xlist1:
if a not in xlist:
xlist.append(a)
if debug:
print("All sigmas are surrounded, by {}".format(xlist))
return True, xlist
def tri_det(a1, a2, a3):
return Matrix(3,3,[a1,a2,a3, a1.conjugate(), a2.conjugate(), a3.conjugate(), 1, 1, 1]).det()
def intersection_points_in_k(a1,a2):
"""Given principal cusps a1,a2 returns a list of 0, 1 or 2 points (in
k) where the circles S_a1, S_a2 intersect.
"""
k = nf(a1)
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a, k) for a in alist]
delta = al2-al1
n = delta.norm()
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 > 0:
return []
z = ((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2
return [z + r/(2*delta.conjugate()) for r in k(d2).sqrt(all=True, extend=False)]
def intersection_points_in_CC(a1,a2):
"""Given principal cusps a1,a2 returns a list of 0, 1 or 2 points (in
CC) where the circles S_a1, S_a2 intersect.
"""
k = nf(a1)
emb = next(e for e in k.embeddings(CC) if e(k.gen()).imag()>0)
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a, k) for a in alist]
delta = al2-al1
n = delta.norm()
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 > 0:
return []
z = emb(((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2)
if d2 == 0:
return [z]
rd2 = CC(d2).sqrt() # pure imaginary
z1 = z + rd2/(2*emb(delta.conjugate()))
z2 = 2*z-z1 # = z - rd2/(2*emb(delta.conjugate()))
return [z1,z2]
def show_intersection(a1,a2):
zz = intersection_points_in_CC(a1,a2)
if len(zz)==2:
zz.append((zz[0]+zz[1])/2)
points = [list(z) for z in zz]
k = nf(a1)
emb = next(e for e in k.embeddings(CC) if e(k.gen()).imag()>0)
A = [list(emb(to_k(a, k))) for a in [a1,a2]]
R = [RR(radius_squared(a)).sqrt() for a in [a1,a2]]
circles = [(c,r) for c,r in zip(A,R)]
return plot_circles_and_points(circles, points, fill=True)
def are_intersection_points_covered_by_one(a1, a2, a, plot=False):
"""Given principal cusps a1, a2, a such that the circles S_a1 and
S_a2 intersect in distinct points, test whether S_a covers either
or both.
Returns 0 if neither, 2 if both, +1 or -1 if just one. The signs
are consistent so that if a returns +1 and a' returns -1 then each
intersection point is covered by either S_a or S_a'.
"""
k = nf(a1)
w = k.gen()
emb = next(e for e in k.embeddings(CC) if e(w).imag()>0)
alist = [a1,a2,a]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
r1sq, r2sq, rsq = [radius_squared(a) for a in alist]
al1, al2, al = [to_k(a, k) for a in alist]
n1, n2 = [a.norm() for a in [al1, al2]]
#
delta = al2-al1
n = delta.norm()
z0 = ((al1+al2) + (r1sq-r2sq)/delta.conjugate())/2
d1 = n - (r1sq + r2sq)
d2 = d1**2 - 4*r1sq*r2sq
if d2 >= 0:
raise RuntimeError("cusps {} and {} have non-intersecting circles")
if plot:
points = [list(z) for z in intersection_points_in_CC(a1,a2)]
circle = (list(emb(to_k(a, k))), RR(radius_squared(a)).sqrt())
pic = plot_circles([a1,a2], fill=False) + plot_circles_and_points([circle], points, fill=True)
pic.show()
input("press Enter...")
T = 2 * n * (rsq - (z0-al).norm()) + d2/2 # rational
T2 = T**2
D = tri_det(al, al2, al1) # pure imaginary
D2 = QQ(D**2) # negative rational
d2D2 = d2*D2 # positive rational
# the covering condition is \pm sqrt(d2)*D < T
#print("T = {}, D = {}, d2 = {}".format(T,D,d2))
if d2D2 < T2:
return 2 if T>0 else 0 if T<0 else '?'
if d2D2 > T2:
u = QQ(D/(w-w.conjugate()))
return -1 if u>0 else +1 if u<0 else 0
return 0
def is_singular(s, sigmas):
from utils import sigma_index_with_translation
return sigma_index_with_translation(s, sigmas)[0]!=-1
def translates(a):
w = nf(a).gen()
return [translate_cusp(a,t) for t in [-w-1,-w,-w+1,-1,0,1,w-1,w,w+1]]
def is_inside_one(z, alist):
"""Test whether the cusp z is strictly inside at least one S_a for a
in alist. If so return True, a; otherwise return False, None.
"""
try:
a = next(a for a in alist if is_inside(z, a, strict=True))
return True, a
except StopIteration:
return False, None
def are_intersection_points_covered(a0, a1, alist, sigmas, debug=False):
"""Given principal cusps a0, a1 whose circles S_a0, S_a1 intersect,
and a list of principal cusps alist each of whose circles S_a also
intersects S_a0, test whether each of the two intersection points
of S_a0 and S_a1 is either singular or strictly inside one of the
S_a.
We treat as a special case when the two intersection points are in
k. If not, the code still uses exact arithmetic.
"""
k = nf(a0)
z_in_k = intersection_points_in_k(a0,a1)
if z_in_k:
zz = [cusp(z, k) for z in z_in_k]
if debug:
print("intersection points in k: {}".format(z_in_k))
# check that each is *either* singular *or* contained in some S_a2
for z in zz:
if is_singular(z, sigmas):
if debug:
print("{} is ok: singular".format(z))
else:
ok, a1 = is_inside_one(z, alist)
if ok:
if debug:
print("{} is ok: inside S_{}".format(z, a1))
else:
return False
return True
# Now the intersection points are not in k. Check that either one
# S_a covers both, or two cover one each:
t = 0 # will hold +1 or -1 if we have covered only one of the two
for a2 in alist:
if a2 == a1:
continue
t2 = are_intersection_points_covered_by_one(a0, a1, a2, plot=False)
if debug:
print("a0={}, a1={}, a2={}: t2={}, t={}".format(a0, a1,a2,t2,t))
if t2: # it is 2, +1 or -1
| assert t2 in [-1,1,2]
if debug:
are_intersection_points_covered_by_one(a0, a1, a2, plot=True)
if t2==2 or ([t,t2] in [[1,-1],[-1,1]]):
if debug:
print("t={}, t2={}, | about to return True".format(t,t2))
return True
assert t2 in [-1,1] and t in [0,t2]
if debug:
print("t={}, t2={}, setting t to {}".format(t,t2,t2))
t = t2
return False
def is_alpha_surrounded(a0, alist, sigmas, pairs_ok=[], debug=False, plot=False):
"""Given a principal cusp a0, a candidate list of principal cusps
alist, tests whether the boundary of the disc S_a0 is contained in
the union of the translates S_{b+t} for b in alist, apart from any
singular points on the boundary. It suffices to consider all b+t
such that S_{b+t} intersects S_a in two points and check that each
of the points is either singular or contained in some other
S_{b+t}. This is simplest when the intersection points are in k;
if not the |
#!/usr/bin/env python
import roslib
roslib.load_manifest('camera_controller')
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('frame_broadcaster')
br = tf.TransformBroadcas | ter()
rate = rospy.Rate(10.0)
target_frame = rospy.get_param("~target_frame")
# Camera position
# Translation
x = rospy.get_param("~x",0)
y = rospy.get_param("~y",0)
z = rospy.get_param("~z",0)
# Pose quaternion
qm = rospy.get_param("~qm",0)
qx = rospy.get_param("~qx",0)
qy = rospy.get_param("~qy",0)
qz = rospy.get_param("~qz",1)
while not ro | spy.is_shutdown():
br.sendTransform((x,y,z), (qm, qx, qy, qz), rospy.Time.now(), target_frame, "world")
rate.sleep()
|
# Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'test_run.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, teither version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import random
import os.path
import subprocess
import argparse
from .test_area import TestAreaContext
def path_exists( path ):
if os.path.exists( path ):
return (True , "Path:%s exists" % path)
else:
return (False , "ERROR: Path:%s does not exist" % path)
class TestRun(object):
default_ert_cmd = "ert"
default_ert_version = "stable"
default_path_prefix = None
def __init__(self , config_file , args = [] , name = None):
if os.path.exists( config_file ) and os.path.isfile( config_file ):
self.parseArgs(args)
self.__ert_cmd = TestRun.default_ert_cmd
self.path_prefix = TestRun.default_path_prefix
self.config_file = config_file
self.check_list = []
self.workflows = []
if name:
self.name = name
else:
self.name = config_file.replace("/" , ".")
while True:
if self.name[0] == ".":
self.name = self.name[1:]
else:
break
self.name += "/%08d" % random.randint(0,100000000)
else:
raise IOError("No such config file: %s" % config_file)
def parseArgs(self , args):
parser = argparse.ArgumentParser()
parser.add_argument("-v" , "--version" , default = self.default_ert_version)
parser.add_argument("args" , nargs="*")
result = parser.parse_args(args)
self.ert_version = result.version
self.args = result.args
def get_config_file(self):
return self.__config_file
def set_config_file(self , input_config_file):
self.__config_file = os.path.basename( input_config_file )
self.abs_config_file = os.path.abspath( input_config_file )
config_file = property( get_config_file , set_config_file )
#-----------------------------------------------------------------
def set_path_prefix(self , path_prefix):
self.__path_prefix = path_prefix
def get_path_prefix(self):
return self.__path_prefix
path_prefix = property( get_path_prefix , set_path_prefix )
#-----------------------------------------------------------------
def get_ert_cmd(self):
return self.__ert_cmd
def set_ert_cmd(self , cmd):
self.__ert_cmd = cmd
ert_cmd = property( get_ert_cmd , set_ert_cmd)
#-----------------------------------------------------------------
def get_workflows(self):
return self.workflows
def add_workflow(self , workflow):
self.workflows.append( workflow )
#------ | -----------------------------------------------------------
def get_args(self):
return self.args
#-----------------------------------------------------------------
|
def add_check( self , check_func , arg):
if callable(check_func):
self.check_list.append( (check_func , arg) )
else:
raise Exception("The checker:%s is not callable" % check_func )
#-----------------------------------------------------------------
def __run(self , work_area ):
argList = [ self.ert_cmd , "-v" , self.ert_version ]
for arg in self.args:
argList.append( arg )
argList.append( self.config_file )
for wf in self.workflows:
argList.append( wf )
status = subprocess.call( argList )
if status == 0:
return (True , "ert has run successfully")
else:
return (False , "ERROR:: ert exited with status code:%s" % status)
def run(self):
if len(self.workflows):
with TestAreaContext(self.name , prefix = self.path_prefix , store_area = False) as work_area:
test_cwd = work_area.get_cwd()
work_area.copy_parent_content( self.abs_config_file )
status = self.__run( work_area )
global_status = status[0]
status_list = [ status ]
if status[0]:
for (check_func , arg) in self.check_list:
status = check_func( arg )
status_list.append( status )
if not status[0]:
global_status = False
if not global_status:
work_area.set_store( True )
return (global_status , test_cwd , status_list)
else:
raise Exception("Must have added workflows before invoking start()")
|
from equity i | mport EquityPricer
class FuturePricer(EquityPricer):
def __init__(self):
super( | FuturePricer,self).__init__()
|
ile.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
self.object.delete()
# response = JSONResponse(True, {}, response_mimetype(self.request))
# response['Content-Disposition'] = 'inline; filename=files.json'
# return response
| messages.add_message(request, messages.INFO, "Individual deleted with success!")
#return redirect('individuals_list')
return redirect('individuals_list')
def view(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
variant_list = Variant.objects.filter(individual=individual)
# snpeff = SnpeffAnnotation.objects.filter(individual=individual)
individual.n_variants = variant_list.count()
individual.novel_variants = variant_list. | filter(variant_id = '.').count()
individual.summary = []
#get calculated values from database
summary_item = {
'type': 'Total SNVs',
'total': variant_list.values('genotype').count(),
'discrete': variant_list.values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
summary_item = {
'type': 'Total Gene-associated SNVs',
'total': variant_list.values('gene').exclude(gene="").count(),
'discrete': variant_list.exclude(gene="").values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
individual.snp_eff = variant_list.values('snpeff_effect').annotate(Count('snpeff_effect')).order_by('snpeff_effect')
# print 'individual.snp_eff', individual.snp_eff
# variant_list.values('snpeff__effect').annotate(Count('snpeff__effect')).order_by('snpeff__effect')
#
individual.functional_class = variant_list.values('snpeff_func_class').annotate(Count('snpeff_func_class')).order_by('snpeff_func_class')
individual.impact_variants = variant_list.values('snpeff_impact').annotate(Count('snpeff_impact')).order_by('snpeff_impact')
individual.filter_variants = variant_list.values('filter').annotate(Count('filter')).order_by('filter')
individual.quality = variant_list.aggregate(Avg('qual'), Max('qual'), Min('qual'))
individual.read_depth = variant_list.aggregate(Avg('read_depth'), Max('read_depth'), Min('read_depth'))
individual.clinvar_clnsig = variant_list.values('clinvar_clnsig').annotate(total=Count('clinvar_clnsig'))
individual.chromossome = variant_list.values('chr').annotate(total=Count('chr')).order_by('chr')
# variants_with_snpid = variant_list.values('variant_id').exclude(variant_id=".")
#print variants_with_snpid
# fields = Variant._meta.get_all_field_names()
paginator = Paginator(variant_list, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
#'fields':fields
return render(request, 'individuals/view.html', {'individual': individual, 'variants':variants})
@login_required
def browse(request, individual_id):
query_string = request.META['QUERY_STRING']
individual = get_object_or_404(Individual, pk=individual_id)
query = {}
# DEFAULT_SORT = 'pk'
# sort_key = request.GET.get('sort', DEFAULT_SORT)
# tags = ['genotype', 'snpeffannotation__effect']#, 'func_class', 'impact', 'cln_omim', 'chr'
# for tag in tags:
# criteria = request.GET.get(tag, '')
# if criteria:
# query[tag] = criteria
if request.method == 'GET':
form = BrowserForm(request.GET)
if form.is_valid():
print('form is valid')
#chr
chr = request.GET.get('chr', '')
if chr != '':
query['chr'] = chr
#pos
pos = request.GET.get('pos', '')
if pos != '':
query['pos'] = pos
effect = request.GET.get('effect', '')
if effect != '':
print('effect', effect)
query['snpeff_effect'] = effect
#snp_id
# snp_id = request.GET.get('snp_id', '')
# if snp_id != '':
# query['variant_id'] = snp_id
# snp_list = request.GET.get('snp_list', '')
# snp_list = snp_list.split('\r\n')
# if snp_list[0] != u'':
# query['variant_id__in'] = snp_list
# snp_eff = request.GET.getlist('effect')
# if len(snp_eff) > 0:
# query['snp_eff__in'] = snp_eff
# func_class = request.GET.getlist('func_class')
# if len(func_class) > 0:
# query['snp_eff_functional_class__in'] = func_class
# gene = request.GET.get('gene', '')
# if gene != '':
# query['gene_name'] = gene
# gene_list = request.GET.get('gene_list', '')
# gene_list = gene_list.split('\r\n')
# if gene_list[0] != u'':
# query['gene_name__in'] = gene_list
# cln = request.GET.get('cln_omim', '')
# print 'clnomim', cln
# if cln == 'on':
# query['cln_omim'] != ''
variants = Variant.objects.filter(individual=individual, **query)
# snpeff_annotations = SnpeffAnnotation.objects.filter(variant__in=variants)
# #b.entry_set.filter(headline__contains='Lennon')
# print 'snpeff_annotations', len(snpeff_annotations)
# for variant in variants:
# print variant.entry_set.all()
# variant.snpeff=
else:
form = BrowserForm(request.GET)
variants = Variant.objects.filter(individual=individual, **query)
#Pagination
paginator = Paginator(variants, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
return render(request, 'variants/variants.html', {'individual': individual, 'variants':variants, 'form':form, 'query_string':query_string})
@login_required
def list(request):
if request.method == 'POST':
individuals = request.POST.getlist('individuals')
print(individuals)
individuals = [int(x) for x in individuals]
print(individuals)
if request.POST['selectionField'] == "Show":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = True
individual.save()
if request.POST['selectionField'] == "Hide":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = False
individual.save()
if request.POST['selectionField'] == "Delete":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual_id = individual.id
username = individual.user.username
#delete files
if individual.vcf_file:
individual.vcf_file.delete()
# if individual.strs_file:
# individual.strs_file.delete()
# if individual.cnvs_file:
|
from __future__ import (absolute_import, division, print_function)
import unittest
import mantid
import os
import numpy as np
from sans.test_helper.test_director import TestDirector
from sans.state.wavelength_and_pixel_adjustment import get_wavelength_and_pixel_adjustment_builder
from sans.common.enums import (RebinType, RangeStepType, DetectorType)
from sans.common.general_functions import (create_unmanaged_algorithm)
from sans.common.constants import EMPTY_NAME
class SANSCalculateTransmissionTest(unittest.TestCase):
@staticmethod
def _create_test_wavelength_adjustment_file(file_name):
test_file = (" Tue 24-MAR-2015 00:02 Workspace: directbeam_new_hist\n"
"\n"
" 6 0 0 0 1 6 0\n"
" 0 0 0 0\n"
" 3 (F12.5,2E16.6)\n"
" 1.00000 5.000000e-01 5.000000e-01\n"
" 3.00000 5.000000e-01 5.000000e-01\n"
" 5.00000 5.000000e-01 5.000000e-01\n"
" 7.00000 5.000000e-01 5.000000e-01\n"
" 9.00000 5.000000e-01 5.000000e-01\n"
" 11.00000 5.000000e-01 5.000000e-01\n")
full_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), file_name)
if os.path.exists(full_file_path):
os.remove(full_file_path)
with open(full_file_path, 'w') as f:
f.write(test_file)
return full_file_path
@staticmethod
def _remove_test_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
@static | method
def _get_state(lab_pixel_file=None, hab_pixel_file=None, lab_wavelength_file=None, hab_wavelength_file=None,
wavelength_low=None, wavelength_high=None, wavelength_step=None,
wavelength_step_type=None):
test_director = TestDirector()
state = test_director.construct()
data_state = state.data
| wavelength_and_pixel_builder = get_wavelength_and_pixel_adjustment_builder(data_state)
if lab_pixel_file:
wavelength_and_pixel_builder.set_LAB_pixel_adjustment_file(lab_pixel_file)
if hab_pixel_file:
wavelength_and_pixel_builder.set_HAB_pixel_adjustment_file(hab_pixel_file)
if lab_wavelength_file:
wavelength_and_pixel_builder.set_LAB_wavelength_adjustment_file(lab_wavelength_file)
if hab_wavelength_file:
wavelength_and_pixel_builder.set_HAB_wavelength_adjustment_file(hab_wavelength_file)
if wavelength_step_type:
wavelength_and_pixel_builder.set_wavelength_step_type(wavelength_step_type)
if wavelength_low:
wavelength_and_pixel_builder.set_wavelength_low(wavelength_low)
if wavelength_high:
wavelength_and_pixel_builder.set_wavelength_high(wavelength_high)
if wavelength_step:
wavelength_and_pixel_builder.set_wavelength_step(wavelength_step)
wavelength_and_pixel_state = wavelength_and_pixel_builder.build()
state.adjustment.wavelength_and_pixel_adjustment = wavelength_and_pixel_state
return state.property_manager
@staticmethod
def _get_workspace(data):
create_name = "CreateSampleWorkspace"
create_options = {"NumBanks": 1,
"BankPixelWidth": 1,
"XMin": 1,
"XMax": 11,
"BinWidth": 2,
"XUnit": "Wavelength",
"OutputWorkspace": EMPTY_NAME}
create_alg = create_unmanaged_algorithm(create_name, **create_options)
create_alg.execute()
workspace = create_alg.getProperty("OutputWorkspace").value
data_y = workspace.dataY(0)
for index in range(len(data_y)):
data_y[index] = data[index]
return workspace
@staticmethod
def _run_test(transmission_workspace, norm_workspace, state, is_lab=True):
adjust_name = "SANSCreateWavelengthAndPixelAdjustment"
adjust_options = {"TransmissionWorkspace": transmission_workspace,
"NormalizeToMonitorWorkspace": norm_workspace,
"SANSState": state,
"OutputWorkspaceWavelengthAdjustment": "out_wavelength",
"OutputWorkspacePixelAdjustment": "out_pixels"}
if is_lab:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.LAB)})
else:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.HAB)})
adjust_alg = create_unmanaged_algorithm(adjust_name, **adjust_options)
adjust_alg.execute()
wavelength_adjustment = adjust_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
pixel_adjustment = adjust_alg.getProperty("OutputWorkspacePixelAdjustment").value
return wavelength_adjustment, pixel_adjustment
def test_that_gets_wavelength_workspace_when_no_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
state = SANSCalculateTransmissionTest._get_state(wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, True)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
def test_that_gets_adjustment_workspace_if_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
expected_direct_file_workspace = [0.5, 0.5, 0.5, 0.5, 0.5]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
direct_file_name = "DIRECT_test.txt"
direct_file_name = SANSCalculateTransmissionTest._create_test_wavelength_adjustment_file(direct_file_name)
state = SANSCalculateTransmissionTest._get_state(hab_wavelength_file=direct_file_name,
wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, False)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)*np.array(expected_direct_file_workspace)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
# Clean up
SANSCalculateTransmissionTest._remove_test_file(direct_file_name)
if __name__ == '__main__':
unittest.main()
|
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction
from gasistafelice.consts import CREATE, EDIT, EDIT_MULTIPLE, VIEW
from gasistafelice.lib.shortcuts import render_to_xml_response, render_to_context_response
from gasistafelice.supplier.models import Supplier
from gasistafelice.gas.models import GASMemberOrder
from gasistafelice.gas.forms.order.gmo import SingleGASMemberOrderForm
from gasistafelice.lib.formsets import BaseFormSetWithRequest
from django.forms.formsets import formset_factory
import logging
log = logging.getLogger(__name__)
#------------------------------- | -----------------------------------------------#
# #
#------------------------------------------------------------------------------#
|
class Block(BlockSSDataTables):
# COMMENT fero: name of this block should be
# something different from "order" (p.e: "make_order")
# because usually we refer to "order" for GASSupplierOrder
BLOCK_NAME = "order"
BLOCK_DESCRIPTION = _("Order")
BLOCK_VALID_RESOURCE_TYPES = ["gasmember"]
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'gasstock__stock__supplier__name',
2: 'gasstock__stock__product__name',
3: 'order_price',
4: 'tot_amount',
5: 'tot_price',
6: '',
7: 'gasstock__stock__product__category__name',
}
# 3: 'gasstock__stock__product__description',
def _get_resource_list(self, request):
selected_orders = request.GET.getlist('gfCP_order')
rv = request.resource.orderable_products
if (selected_orders):
rv = rv.filter(order__pk__in=map(int, selected_orders))
return rv
def options_response(self, request, resource_type, resource_id):
"""Get options for orders block. Check GAS configuration.
WARNING: call to this method doesn't pass through get_response
so you have to reset self.request and self.resource attribute if you want
"""
log.debug("order options_response")
self.request = request
self.resource = request.resource
gas = self.resource.gas
orders = gas.orders.open()
field_type = "checkbox"
if gas.config.order_show_only_next_delivery:
orders = orders.order_by('-delivery__date')
if orders[0].delivery:
orders.filter(delivery__date=orders[0].delivery.date)
else:
orders.filter(delivery__date__isnull=True)
if gas.config.order_show_only_one_at_a_time:
field_type = "radio"
fields = []
for i,open_order in enumerate(orders):
if field_type == "radio":
selected = i == 0
else:
selected = True
fields.append({
'field_type' : field_type,
'field_label' : open_order,
'field_name' : 'order',
'field_values' : [{ 'value' : open_order.pk, 'selected' : selected}]
})
ctx = {
'block_name' : self.description,
'fields': fields,
}
return render_to_xml_response('options.xml', ctx)
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=SingleGASMemberOrderForm,
formset=BaseFormSetWithRequest,
extra=qs.count() - self.__get_gmos(qs).count()
)
def __get_gmos(self, gsop):
log.debug("order block __get_gmos (%s)" % (self.request.resource.gasmember))
return GASMemberOrder.objects.filter(
ordered_product__in=gsop,
purchaser=self.request.resource.gasmember
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
# [:] forces evaluation of the querySet
#FIXME: filtering by purchaser not ok --> return all orders for all gasmembers
gmos = self.__get_gmos(querySet)[:]
data = {}
i = 0
c = querySet.count()
# Store mapping between GSSOP-id and neededs info: formset_index and ordered_total
gmo_info = { }
gmo_lint = GASMemberOrder()
for i,el in enumerate(querySet):
try:
#TODO: to be improved in performance
gmo = el.gasmember_order_set.get(
purchaser=self.request.resource.gasmember
)
except GASMemberOrder.DoesNotExist:
gmo=gmo_lint
key_prefix = 'form-%d' % i
data.update({
'%s-id' % key_prefix : gmo.pk,
'%s-ordered_amount' % key_prefix : gmo.ordered_amount or 0,
'%s-ordered_price' % key_prefix : el.gasstock.price, #displayed as hiddend field
'%s-gsop_id' % key_prefix : el.pk, #displayed as hiddend field
'%s-note' % key_prefix : gmo.note,
})
gmo_info[el.pk] = {
'formset_index' : i,
'ordered_total' : (el.gasstock.price or 0)*(gmo.ordered_amount or 0), # This is the total computed NOW (with ordered_product.price)
}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = gmos.count()
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i,el in enumerate(querySet):
#log.debug("order ordered_amount (%s)" % (i))
try:
form = formset[gmo_info[el.pk]['formset_index']]
total = gmo_info[el.pk]['ordered_total']
except KeyError:
# GASMember has not ordered this product: build an empty form
form = SingleGASMemberOrderForm(self.request)
total = 0
#try:
form.fields['ordered_amount'].widget.attrs = {
'class' : 'amount',
'step' : el.gasstock.step or 1,
'minimum_amount' : el.gasstock.minimum_amount or 1,
's_url' : el.supplier.urn,
'p_url' : el.gasstock.stock.urn,
}
#'p_url' : el.product.urn,
records.append({
'id' : "%s %s %s %s" % (el.pk, form['id'], form['gsop_id'], form['ordered_price']),
'supplier' : el.supplier,
'product' : el.gasstock,
'price' : el.gasstock.price,
'ordered_amount' : form['ordered_amount'], #field inizializzato con il minimo amount e che ha l'attributo step
'ordered_total' : total,
'note' : form['note'],
'category' : el.product.category
})
#'description' : el.product.description,
#except KeyError:
# log.debug("order ordered_amount (%s %s)" % (el.pk, i))
return formset, records, {}
|
from simulator. | sensors.SimSensor import SimSensor
from environment.SensoryData import SensoryData
class SimAudioSensor(SimSensor):
def __init__(self, parentBot, name):
super().__init__('Audio', parentBot, name)
def receiveAudio(self, audio):
return SensoryData(self.name | , 'Audio', audio)
|
# coding=latin-1
from flask import request, g
from flask import abort, flash
from functools import wraps
def checa_permissao(permissao):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if g.user and g.user.checa_permissao(permissao):
return f(*args, | **kwargs)
else:
flash(u'Atenção você não possui a pe | rmissão: %s. Se isto não estiver correto, entre em contato solicitando esta permissão.' % permissao.upper(),u'notice')
abort(401)
return inner
return decorator
|
#!/usr/bin/env python
"""
@package mi.dataset.driver.velpt_ab.dcl
@file mi-dataset/mi/dataset/driver/velpt_ab/dcl/velpt_ab_dcl_recovered_driver.py
@author Joe Padula
@brief Recovered driver for the velpt_ab_dcl instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.velpt_ab_dcl import VelptAbDclParser, \
VelptAbDclParticleClassKey
from mi.dataset.parser.velpt_ab_dcl_particles import VelptAbDclDiagnosticsDataParticleRecovered, \
VelptAbDclInstrumentDataParticleRecovered, \
VelptAbDclDiagnosticsHeaderParticleRecovered
from mi.core.versioning import version
@version("15.7.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = VelptAbDclRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class VelptAbDclRecoveredDriver(SimpleDatasetDriver):
"""
The velpt_ab_dcl driver class extends the SimpleDatasetDriver.
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbDclParticle | ClassKey.METADATA_PARTICLE_CLASS: VelptAbDclDiagnosticsHeaderParticleRecovered,
VelptAbDclParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDclDiagnosticsDataParticleRecovered,
VelptAbDclParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbDclInstrumentDataParticleRecovered
}
}
|
parser = VelptAbDclParser(parser_config,
stream_handle,
self._exception_callback)
return parser
|
backs(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_loseconn', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, b'stdio_test_halfclose', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b'stdio_test_lastwrite', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_hostpeer')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_write')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_writeseq')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, 'wb') as junkFile:
for i in xrange(1024):
junkFile.write(intToBytes(i) + b'\n')
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(intToBytes(toWrite.pop()) + b"\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, b'stdio_test_producer')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b''.join(written))
self.assertFalse(
toWrite,
"Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, b'stdio_test_consumer', junkPath)
def processEnded(reason):
with open(junkPath, 'rb') as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('wb')
| self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
| r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(intToBytes(value))
break
reactor.callLater(0, spin)
rea |
from lexer import lang
from ..tree import Node
class | Integer(Node):
datatype = lang.SEMANTIC_INT_TYPE
"""docstring for Integer."""
def __init__(self, symbol, token):
super().__init__(symbol, token)
def generate_code(self, **cond):
array, line = Node.assignated_array()
Node.array_append(ar | ray, f'{line} LIT {self.symbol}, 0')
|
# import multiprocessing to avoid this bug (http://bugs.python.org/issue15881#msg170215)
import multiprocessing
assert multiprocessing
import re
from setuptools import setup, find_packages
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'tour/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
setup(
name='django-tour',
version=get_version(),
description='Require the django user to complete a series of steps with custom logic',
long_description=open('README.md').read(),
url='https://github.com/ambitioninc/django-tour',
author='Wes Okes',
author_email='wes.okes@gmail.com',
keywords='',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Intended Audie | nce :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
| 'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
],
license='MIT',
install_requires=[
'Django>=1.7',
'djangorestframework>=2.3.13',
'django-manager-utils>=0.8.2',
'django_filter>=0.7',
],
tests_require=[
'psycopg2',
'django-nose>=1.4',
'mock==1.0.1',
'django_dynamic_fixture',
],
test_suite='run_tests.run_tests',
include_package_data=True,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.