repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
Antiun/project | project_categ_issue/__openerp__.py | 16 | 1500 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Per Project Configurable Categorie on Issues',
'summary': 'Projects Issues can have an allowed category list',
'version': '8.0.0.1.0',
"category": "Project Management",
'description': """\
Adds to Issues the ability to limit selectable Categories to a Proeject's
specific list.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'depends': [
'project_issue',
'project_categ',
],
'data': [
'project_categ_view.xml',
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/third_party/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| apache-2.0 |
ChanderG/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
glove747/liberty-neutron | neutron/tests/unit/agent/l3/test_legacy_router.py | 13 | 3281 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.l3 import legacy_router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class BasicRouterTestCaseFramework(base.BaseTestCase):
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
self.driver = mock.Mock()
self.router_id = _uuid()
return legacy_router.LegacyRouter(self.router_id,
router,
self.agent_conf,
self.driver,
**kwargs)
class TestBasicRouterOperations(BasicRouterTestCaseFramework):
def test_remove_floating_ip(self):
ri = self._create_router(mock.MagicMock())
device = mock.Mock()
cidr = '15.1.2.3/32'
ri.remove_floating_ip(device, cidr)
device.delete_addr_and_conntrack_state.assert_called_once_with(cidr)
def test_remove_external_gateway_ip(self):
ri = self._create_router(mock.MagicMock())
device = mock.Mock()
cidr = '172.16.0.0/24'
ri.remove_external_gateway_ip(device, cidr)
device.delete_addr_and_conntrack_state.assert_called_once_with(cidr)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework):
def test_add_floating_ip(self, send_ip_addr_adv_notif):
ri = self._create_router()
ri._add_fip_addr_to_device = mock.Mock(return_value=True)
ip = '15.1.2.3'
result = ri.add_floating_ip({'floating_ip_address': ip},
mock.sentinel.interface_name,
mock.sentinel.device)
ip_lib.send_ip_addr_adv_notif.assert_called_once_with(
ri.ns_name,
mock.sentinel.interface_name,
ip,
self.agent_conf)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_error(self, send_ip_addr_adv_notif):
ri = self._create_router()
ri._add_fip_addr_to_device = mock.Mock(return_value=False)
result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'},
mock.sentinel.interface_name,
mock.sentinel.device)
self.assertFalse(ip_lib.send_ip_addr_adv_notif.called)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
| apache-2.0 |
broferek/ansible | lib/ansible/modules/network/f5/bigip_iapp_service.py | 38 | 31847 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_iapp_service
short_description: Manages TCL iApp services on a BIG-IP
description:
- Manages TCL iApp services on a BIG-IP.
- If you are looking for the API that is communicated with on the BIG-IP,
the one the is used is C(/mgmt/tm/sys/application/service/).
version_added: 2.4
options:
name:
description:
- The name of the iApp service that you want to deploy.
type: str
required: True
template:
description:
- The iApp template from which to instantiate a new service. This
template must exist on your BIG-IP before you can successfully
create a service.
- When creating a new service, this parameter is required.
type: str
parameters:
description:
- A hash of all the required template variables for the iApp template.
If your parameters are stored in a file (the more common scenario)
it is recommended you use either the C(file) or C(template) lookups
to supply the expected parameters.
- These parameters typically consist of the C(lists), C(tables), and
C(variables) fields.
type: dict
force:
description:
- Forces the updating of an iApp service even if the parameters to the
service have not changed. This option is of particular importance if
the iApp template that underlies the service has been updated in-place.
This option is equivalent to re-configuring the iApp if that template
has changed.
type: bool
default: no
state:
description:
- When C(present), ensures that the iApp service is created and running.
When C(absent), ensures that the iApp service has been removed.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
strict_updates:
description:
- Indicates whether the application service is tied to the template,
so when the template is updated, the application service changes to
reflect the updates.
- When C(yes), disallows any updates to the resources that the iApp
service has created, if they are not updated directly through the
iApp.
- When C(no), allows updates outside of the iApp.
- If this option is specified in the Ansible task, it will take precedence
over any similar setting in the iApp Service payload that you provide in
the C(parameters) field.
type: bool
default: yes
version_added: 2.5
traffic_group:
description:
- The traffic group for the iApp service. When creating a new service, if
this value is not specified, the default of C(/Common/traffic-group-1)
will be used.
- If this option is specified in the Ansible task, it will take precedence
over any similar setting in the iApp Service payload that you provide in
the C(parameters) field.
type: str
version_added: 2.5
metadata:
description:
- Metadata associated with the iApp service.
- If this option is specified in the Ansible task, it will take precedence
over any similar setting in the iApp Service payload that you provide in
the C(parameters) field.
type: list
version_added: 2.7
description:
description:
- Description of the iApp service.
- If this option is specified in the Ansible task, it will take precedence
over any similar setting in the iApp Service payload that you provide in
the C(parameters) field.
type: str
version_added: 2.7
device_group:
description:
- The device group for the iApp service.
- If this option is specified in the Ansible task, it will take precedence
over any similar setting in the iApp Service payload that you provide in
the C(parameters) field.
type: str
version_added: 2.7
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create HTTP iApp service from iApp template
bigip_iapp_service:
name: foo-service
template: f5.http
parameters: "{{ lookup('file', 'f5.http.parameters.json') }}"
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Upgrade foo-service to v1.2.0rc4 of the f5.http template
bigip_iapp_service:
name: foo-service
template: f5.http.v1.2.0rc4
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Configure a service using parameters in YAML
bigip_iapp_service:
name: tests
template: web_frontends
state: present
parameters:
variables:
- name: var__vs_address
value: 1.1.1.1
- name: pm__apache_servers_for_http
value: 2.2.2.1:80
- name: pm__apache_servers_for_https
value: 2.2.2.2:80
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Re-configure a service whose underlying iApp was updated in place
bigip_iapp_service:
name: tests
template: web_frontends
force: yes
state: present
parameters:
variables:
- name: var__vs_address
value: 1.1.1.1
- name: pm__apache_servers_for_http
value: 2.2.2.1:80
- name: pm__apache_servers_for_https
value: 2.2.2.2:80
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Try to remove the iApp template before the associated Service is removed
bigip_iapp_template:
name: web_frontends
state: absent
provider:
user: admin
password: secret
server: lb.mydomain.com
register: result
failed_when:
- result is not success
- "'referenced by one or more applications' not in result.msg"
- name: Configure a service using more complicated parameters
bigip_iapp_service:
name: tests
template: web_frontends
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
parameters:
variables:
- name: var__vs_address
value: 1.1.1.1
- name: pm__apache_servers_for_http
value: 2.2.2.1:80
- name: pm__apache_servers_for_https
value: 2.2.2.2:80
lists:
- name: irules__irules
value:
- foo
- bar
tables:
- name: basic__snatpool_members
- name: net__snatpool_members
- name: optimizations__hosts
- name: pool__hosts
columnNames:
- name
rows:
- row:
- internal.company.bar
- name: pool__members
columnNames:
- addr
- port
- connection_limit
rows:
- row:
- "none"
- 80
- 0
- name: server_pools__servers
delegate_to: localhost
- name: Override metadata that may or may not exist in parameters
bigip_iapp_service:
name: foo-service
template: f5.http
parameters: "{{ lookup('file', 'f5.http.parameters.json') }}"
metadata:
- persist: yes
name: data 1
- persist: yes
name: data 2
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.urls import build_service_uri
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.urls import build_service_uri
class Parameters(AnsibleF5Parameters):
api_map = {
'strictUpdates': 'strict_updates',
'trafficGroup': 'traffic_group',
'deviceGroup': 'device_group',
}
returnables = [
'tables',
'variables',
'lists',
'strict_updates',
'traffic_group',
'device_group',
'metadata',
'template',
'description',
]
api_attributes = [
'tables',
'variables',
'template',
'lists',
'deviceGroup',
'inheritedDevicegroup',
'inheritedTrafficGroup',
'trafficGroup',
'strictUpdates',
# 'metadata',
'description',
]
updatables = [
'tables',
'variables',
'lists',
'strict_updates',
'device_group',
'traffic_group',
'metadata',
'description',
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def normalize_tables(self, tables):
result = []
for table in tables:
tmp = dict()
name = table.get('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.get('columnNames', None)
if columns:
tmp['columnNames'] = [str(x) for x in columns]
# You cannot have rows without columns
rows = table.get('rows', None)
if rows:
tmp['rows'] = []
for row in rows:
tmp['rows'].append(dict(row=[str(x) for x in row['row']]))
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
def normalize_variables(self, variables):
result = []
for variable in variables:
tmp = dict((str(k), str(v)) for k, v in iteritems(variable))
if 'encrypted' not in tmp:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' not in tmp:
tmp['value'] = ''
# This seems to happen only on 12.0.0
elif tmp['value'] == 'none':
tmp['value'] = ''
elif tmp['value'] == 'True':
tmp['value'] = 'yes'
elif tmp['value'] == 'False':
tmp['value'] = 'no'
elif isinstance(tmp['value'], bool):
if tmp['value'] is True:
tmp['value'] = 'yes'
else:
tmp['value'] = 'no'
if tmp['encrypted'] == 'True':
tmp['encrypted'] = 'yes'
elif tmp['encrypted'] == 'False':
tmp['encrypted'] = 'no'
elif isinstance(tmp['encrypted'], bool):
if tmp['encrypted'] is True:
tmp['encrypted'] = 'yes'
else:
tmp['encrypted'] = 'no'
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
def normalize_list(self, lists):
result = []
for list in lists:
tmp = dict((str(k), str(v)) for k, v in iteritems(list) if k != 'value')
if 'encrypted' not in list:
# BIG-IP will inject an 'encrypted' key if you don't provide one.
# If you don't provide one, then we give you the default 'no', by
# default.
tmp['encrypted'] = 'no'
if 'value' in list:
if len(list['value']) > 0:
# BIG-IP removes empty values entries, so mimic this behavior
# for user-supplied values.
tmp['value'] = [str(x) for x in list['value']]
if tmp['encrypted'] == 'True':
tmp['encrypted'] = 'yes'
elif tmp['encrypted'] == 'False':
tmp['encrypted'] = 'no'
elif isinstance(tmp['encrypted'], bool):
if tmp['encrypted'] is True:
tmp['encrypted'] = 'yes'
else:
tmp['encrypted'] = 'no'
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
def normalize_metadata(self, metadata):
result = []
for item in metadata:
name = item.get('name', None)
persist = flatten_boolean(item.get('persist', "no"))
if persist == "yes":
persist = "true"
else:
persist = "false"
result.append({
"name": name,
"persist": persist
})
return result
class ApiParameters(Parameters):
@property
def metadata(self):
if self._values['metadata'] is None:
return None
return self._values['metadata']
@property
def tables(self):
if self._values['tables'] is None:
return None
return self.normalize_tables(self._values['tables'])
@property
def lists(self):
if self._values['lists'] is None:
return None
return self.normalize_list(self._values['lists'])
@property
def variables(self):
if self._values['variables'] is None:
return None
return self.normalize_variables(self._values['variables'])
@property
def device_group(self):
if self._values['device_group'] in [None, 'none']:
return None
return self._values['device_group']
class ModuleParameters(Parameters):
@property
def param_lists(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('lists', None)
return result
@property
def param_tables(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('tables', None)
return result
@property
def param_variables(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('variables', None)
return result
@property
def param_metadata(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('metadata', None)
return result
@property
def param_description(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('description', None)
return result
@property
def param_traffic_group(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('trafficGroup', None)
if not result:
return result
return fq_name(self.partition, result)
@property
def param_device_group(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('deviceGroup', None)
if not result:
return result
return fq_name(self.partition, result)
@property
def param_strict_updates(self):
if self._values['parameters'] is None:
return None
result = self._values['parameters'].get('strictUpdates', None)
return flatten_boolean(result)
@property
def tables(self):
if self._values['tables']:
return self.normalize_tables(self._values['tables'])
elif self.param_tables:
return self.normalize_tables(self.param_tables)
return None
@property
def lists(self):
if self._values['lists']:
return self.normalize_list(self._values['lists'])
elif self.param_lists:
return self.normalize_list(self.param_lists)
return None
@property
def variables(self):
if self._values['variables']:
return self.normalize_variables(self._values['variables'])
elif self.param_variables:
return self.normalize_variables(self.param_variables)
return None
@property
def metadata(self):
if self._values['metadata']:
result = self.normalize_metadata(self._values['metadata'])
elif self.param_metadata:
result = self.normalize_metadata(self.param_metadata)
else:
return None
return result
@property
def template(self):
if self._values['template'] is None:
return None
return fq_name(self.partition, self._values['template'])
@property
def device_group(self):
if self._values['device_group'] not in [None, 'none']:
result = fq_name(self.partition, self._values['device_group'])
elif self.param_device_group not in [None, 'none']:
result = self.param_device_group
else:
return None
if not result.startswith('/Common/'):
raise F5ModuleError(
"Device groups can only exist in /Common"
)
return result
@property
def traffic_group(self):
if self._values['traffic_group']:
result = fq_name(self.partition, self._values['traffic_group'])
elif self.param_traffic_group:
result = self.param_traffic_group
else:
return None
if not result.startswith('/Common/'):
raise F5ModuleError(
"Traffic groups can only exist in /Common"
)
return result
@property
def strict_updates(self):
if self._values['strict_updates'] is not None:
result = flatten_boolean(self._values['strict_updates'])
elif self.param_strict_updates is not None:
result = flatten_boolean(self.param_strict_updates)
else:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def description(self):
if self._values['description']:
return self._values['description']
elif self.param_description:
return self.param_description
return None
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def metadata(self):
if self.want.metadata is None:
return None
if self.have.metadata is None:
return self.want.metadata
want = [(k, v) for d in self.want.metadata for k, v in iteritems(d)]
have = [(k, v) for d in self.have.metadata for k, v in iteritems(d)]
if set(want) != set(have):
return dict(
metadata=self.want.metadata
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def create(self):
self._set_changed_options()
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-1'})
if not self.template_exists():
raise F5ModuleError(
"The specified template does not exist in the provided partition."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp service")
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update() and not self.want.force:
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exists(self):
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
if params:
params['execute-action'] = 'definition'
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.changes.metadata:
params = dict(metadata=self.changes.metadata)
params.update({'execute-action': 'definition'})
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def template_exists(self):
name = fq_name(self.want.partition, self.want.template)
parts = name.split('/')
uri = "https://{0}:{1}/mgmt/tm/sys/application/template/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(parts[1], parts[2])
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.changes.metadata:
payload = dict(metadata=self.changes.metadata)
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
base_uri = "https://{0}:{1}/mgmt/tm/sys/application/service/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = build_service_uri(base_uri, self.want.partition, self.want.name)
# Metadata needs to be zero'd before the service is removed because
# otherwise, the API will error out saying that "configuration items"
# currently exist.
#
# In other words, the REST API is not able to delete a service while
# there is existing metadata
payload = dict(metadata=[])
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
template=dict(),
description=dict(),
device_group=dict(),
parameters=dict(
type='dict'
),
state=dict(
default='present',
choices=['absent', 'present']
),
force=dict(
default='no',
type='bool'
),
strict_updates=dict(
type='bool',
default='yes'
),
metadata=dict(type='list'),
traffic_group=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
vadimtk/chrome4sdp | third_party/cython/src/Cython/Distutils/build_ext.py | 94 | 12955 | """Cython.Distutils.build_ext
Implements a version of the Distutils 'build_ext' command, for
building Cython extension modules."""
# This module should be kept compatible with Python 2.3.
__revision__ = "$Id:$"
import sys
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsPlatformError
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer, newer_group
from distutils import log
from distutils.dir_util import mkpath
from distutils.command import build_ext as _build_ext
from distutils import sysconfig
extension_name_re = _build_ext.extension_name_re
show_compilers = _build_ext.show_compilers
class Optimization(object):
def __init__(self):
self.flags = (
'OPT',
'CFLAGS',
'CPPFLAGS',
'EXTRA_CFLAGS',
'BASECFLAGS',
'PY_CFLAGS',
)
self.state = sysconfig.get_config_vars(*self.flags)
self.config_vars = sysconfig.get_config_vars()
def disable_optimization(self):
"disable optimization for the C or C++ compiler"
badoptions = ('-O1', '-O2', '-O3')
for flag, option in zip(self.flags, self.state):
if option is not None:
L = [opt for opt in option.split() if opt not in badoptions]
self.config_vars[flag] = ' '.join(L)
def restore_state(self):
"restore the original state"
for flag, option in zip(self.flags, self.state):
if option is not None:
self.config_vars[flag] = option
optimization = Optimization()
class build_ext(_build_ext.build_ext):
description = "build C/C++ and Cython extensions (compile/link to build directory)"
sep_by = _build_ext.build_ext.sep_by
user_options = _build_ext.build_ext.user_options
boolean_options = _build_ext.build_ext.boolean_options
help_options = _build_ext.build_ext.help_options
# Add the pyrex specific data.
user_options.extend([
('cython-cplus', None,
"generate C++ source files"),
('cython-create-listing', None,
"write errors to a listing file"),
('cython-line-directives', None,
"emit source line directives"),
('cython-include-dirs=', None,
"path to the Cython include files" + sep_by),
('cython-c-in-temp', None,
"put generated C files in temp directory"),
('cython-gen-pxi', None,
"generate .pxi file for public declarations"),
('cython-directives=', None,
"compiler directive overrides"),
('cython-gdb', None,
"generate debug information for cygdb"),
('cython-compile-time-env', None,
"cython compile time environment"),
# For backwards compatibility.
('pyrex-cplus', None,
"generate C++ source files"),
('pyrex-create-listing', None,
"write errors to a listing file"),
('pyrex-line-directives', None,
"emit source line directives"),
('pyrex-include-dirs=', None,
"path to the Cython include files" + sep_by),
('pyrex-c-in-temp', None,
"put generated C files in temp directory"),
('pyrex-gen-pxi', None,
"generate .pxi file for public declarations"),
('pyrex-directives=', None,
"compiler directive overrides"),
('pyrex-gdb', None,
"generate debug information for cygdb"),
])
boolean_options.extend([
'cython-cplus', 'cython-create-listing', 'cython-line-directives',
'cython-c-in-temp', 'cython-gdb',
# For backwards compatibility.
'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
'pyrex-c-in-temp', 'pyrex-gdb',
])
def initialize_options(self):
_build_ext.build_ext.initialize_options(self)
self.cython_cplus = 0
self.cython_create_listing = 0
self.cython_line_directives = 0
self.cython_include_dirs = None
self.cython_directives = None
self.cython_c_in_temp = 0
self.cython_gen_pxi = 0
self.cython_gdb = False
self.no_c_in_traceback = 0
self.cython_compile_time_env = None
def __getattr__(self, name):
if name[:6] == 'pyrex_':
return getattr(self, 'cython_' + name[6:])
else:
return _build_ext.build_ext.__getattr__(self, name)
def __setattr__(self, name, value):
if name[:6] == 'pyrex_':
return setattr(self, 'cython_' + name[6:], value)
else:
# _build_ext.build_ext.__setattr__(self, name, value)
self.__dict__[name] = value
def finalize_options (self):
_build_ext.build_ext.finalize_options(self)
if self.cython_include_dirs is None:
self.cython_include_dirs = []
elif isinstance(self.cython_include_dirs, basestring):
self.cython_include_dirs = \
self.cython_include_dirs.split(os.pathsep)
if self.cython_directives is None:
self.cython_directives = {}
# finalize_options ()
def run(self):
# We have one shot at this before build_ext initializes the compiler.
# If --pyrex-gdb is in effect as a command line option or as option
# of any Extension module, disable optimization for the C or C++
# compiler.
if self.cython_gdb or [1 for ext in self.extensions
if getattr(ext, 'cython_gdb', False)]:
optimization.disable_optimization()
_build_ext.build_ext.run(self)
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
def cython_sources(self, sources, extension):
"""
Walk the list of source files in 'sources', looking for Cython
source files (.pyx and .py). Run Cython on all that are
found, and return a modified 'sources' list with Cython source
files replaced by the generated C (or C++) files.
"""
try:
from Cython.Compiler.Main \
import CompilationOptions, \
default_options as cython_default_options, \
compile as cython_compile
from Cython.Compiler.Errors import PyrexError
except ImportError:
e = sys.exc_info()[1]
print("failed to import Cython: %s" % e)
raise DistutilsPlatformError("Cython does not appear to be installed")
new_sources = []
cython_sources = []
cython_targets = {}
# Setup create_list and cplus from the extension options if
# Cython.Distutils.extension.Extension is used, otherwise just
# use what was parsed from the command-line or the configuration file.
# cplus will also be set to true is extension.language is equal to
# 'C++' or 'c++'.
#try:
# create_listing = self.cython_create_listing or \
# extension.cython_create_listing
# cplus = self.cython_cplus or \
# extension.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
#except AttributeError:
# create_listing = self.cython_create_listing
# cplus = self.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
create_listing = self.cython_create_listing or \
getattr(extension, 'cython_create_listing', 0)
line_directives = self.cython_line_directives or \
getattr(extension, 'cython_line_directives', 0)
no_c_in_traceback = self.no_c_in_traceback or \
getattr(extension, 'no_c_in_traceback', 0)
cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \
(extension.language and extension.language.lower() == 'c++')
cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0)
cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False)
cython_compile_time_env = self.cython_compile_time_env or \
getattr(extension, 'cython_compile_time_env', None)
# Set up the include_path for the Cython compiler:
# 1. Start with the command line option.
# 2. Add in any (unique) paths from the extension
# cython_include_dirs (if Cython.Distutils.extension is used).
# 3. Add in any (unique) paths from the extension include_dirs
includes = self.cython_include_dirs
try:
for i in extension.cython_include_dirs:
if not i in includes:
includes.append(i)
except AttributeError:
pass
for i in extension.include_dirs:
if not i in includes:
includes.append(i)
# Set up Cython compiler directives:
# 1. Start with the command line option.
# 2. Add in any (unique) entries from the extension
# cython_directives (if Cython.Distutils.extension is used).
directives = self.cython_directives
if hasattr(extension, "cython_directives"):
directives.update(extension.cython_directives)
# Set the target_ext to '.c'. Cython will change this to '.cpp' if
# needed.
if cplus:
target_ext = '.cpp'
else:
target_ext = '.c'
# Decide whether to drop the generated C files into the temp dir
# or the source tree.
if not self.inplace and (self.cython_c_in_temp
or getattr(extension, 'cython_c_in_temp', 0)):
target_dir = os.path.join(self.build_temp, "pyrex")
for package_name in extension.name.split('.')[:-1]:
target_dir = os.path.join(target_dir, package_name)
else:
target_dir = None
newest_dependency = None
for source in sources:
(base, ext) = os.path.splitext(os.path.basename(source))
if ext == ".py":
# FIXME: we might want to special case this some more
ext = '.pyx'
if ext == ".pyx": # Cython source file
output_dir = target_dir or os.path.dirname(source)
new_sources.append(os.path.join(output_dir, base + target_ext))
cython_sources.append(source)
cython_targets[source] = new_sources[-1]
elif ext == '.pxi' or ext == '.pxd':
if newest_dependency is None \
or newer(source, newest_dependency):
newest_dependency = source
else:
new_sources.append(source)
if not cython_sources:
return new_sources
module_name = extension.name
for source in cython_sources:
target = cython_targets[source]
depends = [source] + list(extension.depends or ())
if(source[-4:].lower()==".pyx" and os.path.isfile(source[:-3]+"pxd")):
depends += [source[:-3]+"pxd"]
rebuild = self.force or newer_group(depends, target, 'newer')
if not rebuild and newest_dependency is not None:
rebuild = newer(newest_dependency, target)
if rebuild:
log.info("cythoning %s to %s", source, target)
self.mkpath(os.path.dirname(target))
if self.inplace:
output_dir = os.curdir
else:
output_dir = self.build_lib
options = CompilationOptions(cython_default_options,
use_listing_file = create_listing,
include_path = includes,
compiler_directives = directives,
output_file = target,
cplus = cplus,
emit_linenums = line_directives,
c_line_in_traceback = not no_c_in_traceback,
generate_pxi = cython_gen_pxi,
output_dir = output_dir,
gdb_debug = cython_gdb,
compile_time_env = cython_compile_time_env)
result = cython_compile(source, options=options,
full_module_name=module_name)
else:
log.info("skipping '%s' Cython extension (up-to-date)", target)
return new_sources
# cython_sources ()
# class build_ext
| bsd-3-clause |
antotodd/project2 | lib/markupsafe/_native.py | 1243 | 1187 | # -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation the C module is not compiled.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from markupsafe import Markup
from markupsafe._compat import text_type
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def escape_silent(s):
"""Like :func:`escape` but converts `None` into an empty
markup string.
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, text_type):
s = text_type(s)
return s
| apache-2.0 |
philipan/paparazzi | sw/ground_segment/python/ivytoredis/ivy_to_redis.py | 39 | 2358 | #!/usr/bin/env python
from __future__ import print_function
import redis
import time
import signal
import argparse
import sys
import os
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../../')))
PPRZ_LIB_PYTHON = os.path.join(PPRZ_SRC, "sw/lib/python")
sys.path.append(PPRZ_LIB_PYTHON)
from ivy_msg_interface import IvyMessagesInterface
server = None
class Ivy2RedisServer():
def __init__(self, redishost, redisport, verbose=False):
self.verbose = verbose
self.interface = IvyMessagesInterface(self.message_recv)
self.r = redis.StrictRedis(host=redishost, port=redisport, db=0)
self.keep_running = True
print("Connected to redis server %s on port %i" % (redishost, redisport))
def message_recv(self, ac_id, msg):
# if ac_id is not 0 (i.e. telemetry from an aircraft) include it in the key
# don't add it to the key for ground messages
if ac_id:
key = "{0}.{1}.{2}".format(msg.msg_class, msg.name, ac_id)
else:
key = "{0}.{1}".format(msg.msg_class, msg.name)
if self.verbose:
print("received message, key=%s, msg=%s" % (key, msg.to_json(payload_only=True)))
sys.stdout.flush()
self.r.publish(key, msg.to_json(payload_only=True))
self.r.set(key, msg.to_json(payload_only=True))
def run(self):
while self.keep_running:
time.sleep(0.1)
def stop(self):
self.keep_running = False
self.interface.shutdown()
def signal_handler(signal, frame):
global server
server.stop()
def main():
global server
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--server", help="hostname here redis runs", default="localhost")
parser.add_argument("-p", "--port", help="port used by redis", type=int, default=6379)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
args = parser.parse_args()
server = Ivy2RedisServer(args.server, args.port, args.verbose)
signal.signal(signal.SIGINT, signal_handler)
server.run()
if __name__ == '__main__':
main()
| gpl-2.0 |
ehashman/oh-mainline | vendor/packages/scrapy/scrapy/core/downloader/__init__.py | 19 | 6133 | import random
import warnings
from time import time
from collections import deque
from functools import partial
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.signal import send_catch_log
from scrapy.utils.httpobj import urlparse_cached
from scrapy.resolver import dnscache
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy import signals
from scrapy import log
from .middleware import DownloaderMiddlewareManager
from .handlers import DownloadHandlers
class Slot(object):
"""Downloader slot"""
def __init__(self, concurrency, delay, settings):
self.concurrency = concurrency
self.delay = delay
self.randomize_delay = settings.getbool('RANDOMIZE_DOWNLOAD_DELAY')
self.active = set()
self.queue = deque()
self.transferring = set()
self.lastseen = 0
def free_transfer_slots(self):
return self.concurrency - len(self.transferring)
def download_delay(self):
if self.randomize_delay:
return random.uniform(0.5*self.delay, 1.5*self.delay)
return self.delay
def _get_concurrency_delay(concurrency, spider, settings):
delay = settings.getfloat('DOWNLOAD_DELAY')
if hasattr(spider, 'DOWNLOAD_DELAY'):
warnings.warn("%s.DOWNLOAD_DELAY attribute is deprecated, use %s.download_delay instead" %
(type(spider).__name__, type(spider).__name__))
delay = spider.DOWNLOAD_DELAY
if hasattr(spider, 'download_delay'):
delay = spider.download_delay
# TODO: remove for Scrapy 0.15
c = settings.getint('CONCURRENT_REQUESTS_PER_SPIDER')
if c:
warnings.warn("CONCURRENT_REQUESTS_PER_SPIDER setting is deprecated, " \
"use CONCURRENT_REQUESTS_PER_DOMAIN instead", ScrapyDeprecationWarning)
concurrency = c
# ----------------------------
if hasattr(spider, 'max_concurrent_requests'):
concurrency = spider.max_concurrent_requests
if delay > 0:
concurrency = 1 # force concurrency=1 if download delay required
return concurrency, delay
class Downloader(object):
def __init__(self, crawler):
self.settings = crawler.settings
self.slots = {}
self.active = set()
self.handlers = DownloadHandlers()
self.total_concurrency = self.settings.getint('CONCURRENT_REQUESTS')
self.domain_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self.ip_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_IP')
self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)
self.inactive_slots = {}
def fetch(self, request, spider):
key, slot = self._get_slot(request, spider)
self.active.add(request)
slot.active.add(request)
def _deactivate(response):
self.active.remove(request)
slot.active.remove(request)
if not slot.active: # remove empty slots
self.inactive_slots[key] = self.slots.pop(key)
return response
dlfunc = partial(self._enqueue_request, slot=slot)
dfd = self.middleware.download(dlfunc, request, spider)
return dfd.addBoth(_deactivate)
def needs_backout(self):
return len(self.active) >= self.total_concurrency
def _get_slot(self, request, spider):
key = urlparse_cached(request).hostname or ''
if self.ip_concurrency:
key = dnscache.get(key, key)
if key not in self.slots:
if key in self.inactive_slots:
self.slots[key] = self.inactive_slots.pop(key)
else:
if self.ip_concurrency:
concurrency = self.ip_concurrency
else:
concurrency = self.domain_concurrency
concurrency, delay = _get_concurrency_delay(concurrency, spider, self.settings)
self.slots[key] = Slot(concurrency, delay, self.settings)
return key, self.slots[key]
def _enqueue_request(self, request, spider, slot):
def _downloaded(response):
send_catch_log(signal=signals.response_downloaded, \
response=response, request=request, spider=spider)
return response
deferred = defer.Deferred().addCallback(_downloaded)
slot.queue.append((request, deferred))
self._process_queue(spider, slot)
return deferred
def _process_queue(self, spider, slot):
# Delay queue processing if a download_delay is configured
now = time()
delay = slot.download_delay()
if delay:
penalty = delay - now + slot.lastseen
if penalty > 0 and slot.free_transfer_slots():
d = defer.Deferred()
d.addCallback(self._process_queue, slot)
reactor.callLater(penalty, d.callback, spider)
return
slot.lastseen = now
# Process enqueued requests if there are free slots to transfer for this slot
while slot.queue and slot.free_transfer_slots() > 0:
request, deferred = slot.queue.popleft()
dfd = self._download(slot, request, spider)
dfd.chainDeferred(deferred)
def _download(self, slot, request, spider):
# The order is very important for the following deferreds. Do not change!
# 1. Create the download deferred
dfd = mustbe_deferred(self.handlers.download_request, request, spider)
# 2. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
slot.transferring.add(request)
def finish_transferring(_):
slot.transferring.remove(request)
self._process_queue(spider, slot)
return _
return dfd.addBoth(finish_transferring)
def is_idle(self):
return not self.slots
| agpl-3.0 |
bonitadecker77/python-for-android | python-build/python-libs/gdata/src/gdata/apps/__init__.py | 285 | 21144 | #!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class EmailList(atom.AtomBase):
"""The Google Apps EmailList element"""
_tag = 'emailList'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListFromString(xml_string):
return atom.CreateClassFromXMLString(EmailList, xml_string)
class Who(atom.AtomBase):
"""The Google Apps Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['email'] = 'email'
def __init__(self, rel=None, email=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.email = email
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def WhoFromString(xml_string):
return atom.CreateClassFromXMLString(Who, xml_string)
class Login(atom.AtomBase):
"""The Google Apps Login element"""
_tag = 'login'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['userName'] = 'user_name'
_attributes['password'] = 'password'
_attributes['suspended'] = 'suspended'
_attributes['admin'] = 'admin'
_attributes['changePasswordAtNextLogin'] = 'change_password'
_attributes['agreedToTerms'] = 'agreed_to_terms'
_attributes['ipWhitelisted'] = 'ip_whitelisted'
_attributes['hashFunctionName'] = 'hash_function_name'
def __init__(self, user_name=None, password=None, suspended=None,
ip_whitelisted=None, hash_function_name=None,
admin=None, change_password=None, agreed_to_terms=None,
extension_elements=None, extension_attributes=None,
text=None):
self.user_name = user_name
self.password = password
self.suspended = suspended
self.admin = admin
self.change_password = change_password
self.agreed_to_terms = agreed_to_terms
self.ip_whitelisted = ip_whitelisted
self.hash_function_name = hash_function_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LoginFromString(xml_string):
return atom.CreateClassFromXMLString(Login, xml_string)
class Quota(atom.AtomBase):
"""The Google Apps Quota element"""
_tag = 'quota'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['limit'] = 'limit'
def __init__(self, limit=None, extension_elements=None,
extension_attributes=None, text=None):
self.limit = limit
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def QuotaFromString(xml_string):
return atom.CreateClassFromXMLString(Quota, xml_string)
class Name(atom.AtomBase):
"""The Google Apps Name element"""
_tag = 'name'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['familyName'] = 'family_name'
_attributes['givenName'] = 'given_name'
def __init__(self, family_name=None, given_name=None,
extension_elements=None, extension_attributes=None, text=None):
self.family_name = family_name
self.given_name = given_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return atom.CreateClassFromXMLString(Name, xml_string)
class Nickname(atom.AtomBase):
"""The Google Apps Nickname element"""
_tag = 'nickname'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None,
extension_elements=None, extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameFromString(xml_string):
return atom.CreateClassFromXMLString(Nickname, xml_string)
class NicknameEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry for Nickname"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, nickname=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.nickname = nickname
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameEntryFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameEntry, xml_string)
class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Nickname feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def NicknameFeedFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameFeed, xml_string)
class UserEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}name' % APPS_NAMESPACE] = ('name', Name)
_children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota)
# This child may already be defined in GDataEntry, confirm before removing.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, name=None, quota=None, who=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.name = name
self.quota = quota
self.who = who
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(UserEntry, xml_string)
class UserFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps User feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def UserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(UserFeed, xml_string)
class EmailListEntry(gdata.GDataEntry):
"""A Google Apps EmailList flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList)
# Might be able to remove this _children entry.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
email_list=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.email_list = email_list
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListEntry, xml_string)
class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailList feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListFeed, xml_string)
class EmailListRecipientEntry(gdata.GDataEntry):
"""A Google Apps EmailListRecipient flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
who=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.who = who
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListRecipientEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string)
class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailListRecipient feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[EmailListRecipientEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListRecipientFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string)
class Property(atom.AtomBase):
"""The Google Apps Property element"""
_tag = 'property'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PropertyFromString(xml_string):
return atom.CreateClassFromXMLString(Property, xml_string)
class PropertyEntry(gdata.GDataEntry):
"""A Google Apps Property flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}property' % APPS_NAMESPACE] = ('property', [Property])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
property=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.property = property
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PropertyEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PropertyEntry, xml_string)
class PropertyFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Property feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PropertyEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def PropertyFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PropertyFeed, xml_string)
| apache-2.0 |
eblume/Tiger-Lily | tigerlily/grc/genome_test.py | 1 | 3280 | # genome_test.py - unit tests for genome.py
# Authors:
# * Erich Blume <blume.erich@gmail.com>
#
# Copyright 2011 Erich Blume <blume.erich@gmail.com>
#
# This file is part of Tiger Lily.
#
# Tiger Lily is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tiger Lily is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tiger Lily. If not, see <http://www.gnu.org/licenses/>.
#
"""This module provides unit tests for the ``tigerlily.grc.genome``
module.
As with all unit test modules, the tests it contains can be executed in many
ways, but most easily by going to the project root dir and executing
``python3 setup.py nosetests``.
"""
import unittest
import tempfile
import os
import shutil
import tigerlily.grc.genome as gg
class GRCGenomeTester(unittest.TestCase):
"""Test harness for ``tigerlily.grc.genome.GRCGenome`` class.
At this time, the tests specifically target downloaded genomes.
"""
def setUp(self):
"""Create the testing environment"""
self.test_dir = tempfile.mkdtemp()
self.orig_dir = os.getcwd()
os.chdir(self.test_dir)
def tearDown(self):
"""Remove the testing environment"""
os.chdir(self.orig_dir)
shutil.rmtree(self.test_dir)
def test_NoDigest_NoRetry(self):
"""genome.py: Test local genome 'download', no digest no retry"""
gen = gg.GRCGenome.download('test_nodigest', store=True)
self.assertTrue(os.path.isfile('test_nodigest.assembly'))
def test_GoodDigest_NoRetry(self):
"""genome.py: Test local genome 'download', good digest no retry"""
gen = gg.GRCGenome.download('test_digest', store=True)
self.assertTrue(os.path.isfile('test_digest.assembly'))
def test_BadDigest_NoRetry(self):
"""genome.py: Test local genome 'download', bad digest no retry"""
with self.assertRaises(EnvironmentError):
gen = gg.GRCGenome.download('test_baddigest', store=True)
self.assertFalse(os.path.isfile('test_baddigest.assembly'))
def test_GoodDigest_FourRetry(self):
"""genome.py: Test local genome 'download', good digest 4 retry"""
# Note that we wouldn't expect the retries to make a difference since
# a local file download shouldn't ever fail digest verification, but
# the test is still valid even if we don't EXPECT it to fail.
gen = gg.GRCGenome.download('test_digest', store=True, retries=4)
self.assertTrue(os.path.isfile('test_digest.assembly'))
def test_BadDigest_FourRetry(self):
"""genome.py: Test local genome 'download', bad digest 4 retry"""
with self.assertRaises(EnvironmentError):
gen = gg.GRCGenome.download('test_baddigest', store=True, retries=4)
self.assertFalse(os.path.isfile('test_baddigest.assembly'))
| gpl-3.0 |
0sm0s1z/subterfuge | utilities/notification.py | 5 | 1228 | #!/usr/bin/python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import time
import os
import re
import sys
import datetime
sys.path.append('/usr/share/subterfuge')
#Ignore Deprication Warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from django.conf import settings
settings.configure(
DATABASES = {
'default' : {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': "/usr/share/subterfuge/db",
'USER': '',
'PASSWORD': '',
'HOST': '',
}
}
)
from django.db import models
from main.models import *
def main():
title = sys.argv[1]
message = sys.argv[2]
status = "new"
troubleshoot = ""
try:
troubleshoot = sys.argv[3]
except:
pass
try:
status = sys.argv[4] #Status = hidden to hide from GUI, but log
except:
pass
now = datetime.datetime.now()
date = now.strftime("%d-%m-%Y %H:%M")
logmessage = notification(status = status, title = title, message = message, troubleshoot = troubleshoot, date = date)
logmessage.save()
if __name__ == '__main__':
main()
| gpl-3.0 |
mila-udem/fuel | tests/__init__.py | 15 | 1392 | from importlib import import_module
from unittest.case import SkipTest
from fuel.utils import find_in_data_path
from fuel import config
def skip_if_not_available(modules=None, datasets=None, configurations=None):
"""Raises a SkipTest exception when requirements are not met.
Parameters
----------
modules : list
A list of strings of module names. If one of the modules fails to
import, the test will be skipped.
datasets : list
A list of strings of folder names. If the data path is not
configured, or the folder does not exist, the test is skipped.
configurations : list
A list of of strings of configuration names. If this configuration
is not set and does not have a default, the test will be skipped.
"""
if modules is None:
modules = []
if datasets is None:
datasets = []
if configurations is None:
configurations = []
for module in modules:
try:
import_module(module)
except Exception:
raise SkipTest
if datasets and not hasattr(config, 'data_path'):
raise SkipTest
for dataset in datasets:
try:
find_in_data_path(dataset)
except IOError:
raise SkipTest
for configuration in configurations:
if not hasattr(config, configuration):
raise SkipTest
| mit |
912/M-new | virtualenvironment/tourism_industry/lib/python2.7/site-packages/setuptools/tests/test_packageindex.py | 377 | 7625 | """Package Index Tests
"""
import sys
import os
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode, pathname2url
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
def test_local_index(self):
"""
local_open should be able to read an index from the file system.
"""
f = open('index.html', 'w')
f.write('<div>content</div>')
f.close()
try:
url = 'file:' + pathname2url(os.getcwd()) + '/'
res = setuptools.package_index.local_open(url)
finally:
os.remove('index.html')
assert 'content' in res.read()
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
| gpl-2.0 |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/psrc/parcel/number_of_surveyed_households.py | 2 | 2619 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
from variable_functions import my_attribute_label
class number_of_surveyed_households(Variable):
"""Number of households in a given gridcell"""
_return_type="int32"
surveyed_households_starting_id = 5000000
def dependencies(self):
return [attribute_label("household", "parcel_id"), \
my_attribute_label("parcel_id")]
def compute(self, dataset_pool):
households = dataset_pool.get_dataset('household')
is_surveyed = households.get_attribute("household_id") >= self.surveyed_households_starting_id
return self.get_dataset().sum_dataset_over_ids(households, constant=is_surveyed)
def post_check(self, values, dataset_pool):
size = dataset_pool.get_dataset('household').size()
self.do_check("x >= 0 and x <= " + str(size), values)
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
from psrc.datasets.parcel_dataset import ParcelDataset
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.parcel.number_of_surveyed_households"
def test(self):
storage = StorageFactory().get_storage('dict_storage')
parcels_table_name = 'parcels'
storage.write_table(
table_name=parcels_table_name,
table_data={
'parcel_id':array([1,2,3,4])
},
)
parcels = ParcelDataset(in_storage=storage, in_table_name=parcels_table_name)
sid = number_of_surveyed_households.surveyed_households_starting_id
values = VariableTestToolbox().compute_variable(self.variable_name, \
data_dictionary = {
'parcel':parcels,
'household':{
'parcel_id': array([1, 2, 3, 4, 2, -1]),
'household_id':array([sid+1, 1, sid-5, sid, sid+100, sid+11])
}
},
dataset = 'parcel'
)
should_be = array([1,1,0,1])
self.assert_(ma.allclose(values, should_be, rtol=1e-20),
'Error in ' + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | gpl-2.0 |
MeshCollider/bitcoin | test/functional/p2p_timeouts.py | 25 | 3447 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three bitcoind nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 1 second
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 2 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 3 seconds)
"""
from time import sleep
from test_framework.messages import msg_ping
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
class TestP2PConn(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# set timeout to receive version/verack to 3 seconds
self.extra_args = [["-peertimeout=3"]]
def run_test(self):
# Setup the p2p connections
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn(), wait_for_verack=False)
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
# Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
# verack, since we never sent one
no_verack_node.wait_for_verack()
sleep(1)
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
with self.nodes[0].assert_debug_log(['Unsupported message "ping" prior to verack from peer=0']):
no_verack_node.send_message(msg_ping())
with self.nodes[0].assert_debug_log(['non-version message before version handshake. Message "ping" from peer=1']):
no_version_node.send_message(msg_ping())
sleep(1)
assert "version" in no_verack_node.last_message
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
expected_timeout_logs = [
"version handshake timeout peer=0",
"socket no message in first 3 seconds, 1 0 peer=1",
"socket no message in first 3 seconds, 0 0 peer=2",
]
with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs):
sleep(3)
# By now, we waited a total of 5 seconds. Off-by-two for two
# reasons:
# * The internal precision is one second
# * Account for network delay
assert not no_verack_node.is_connected
assert not no_version_node.is_connected
assert not no_send_node.is_connected
if __name__ == '__main__':
TimeoutsTest().main()
| mit |
xxsergzzxx/python-for-android | python-modules/twisted/twisted/words/test/test_ircsupport.py | 49 | 2302 | # Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.im.ircsupport}.
"""
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import StringTransport
from twisted.words.im.basechat import Conversation, ChatUI
from twisted.words.im.ircsupport import IRCAccount, IRCProto
class StubConversation(Conversation):
def show(self):
pass
class StubChatUI(ChatUI):
def getGroupConversation(self, group, Class=StubConversation, stayHidden=0):
return ChatUI.getGroupConversation(self, group, Class, stayHidden)
class IRCProtoTests(TestCase):
"""
Tests for L{IRCProto}.
"""
def setUp(self):
self.account = IRCAccount(
"Some account", False, "alice", None, "example.com", 6667)
self.proto = IRCProto(self.account, StubChatUI(), None)
def test_login(self):
"""
When L{IRCProto} is connected to a transport, it sends I{NICK} and
I{USER} commands with the username from the account object.
"""
transport = StringTransport()
self.proto.makeConnection(transport)
self.assertEquals(
transport.value(),
"NICK alice\r\n"
"USER alice foo bar :Twisted-IM user\r\n")
def test_authenticate(self):
"""
If created with an account with a password, L{IRCProto} sends a
I{PASS} command before the I{NICK} and I{USER} commands.
"""
self.account.password = "secret"
transport = StringTransport()
self.proto.makeConnection(transport)
self.assertEquals(
transport.value(),
"PASS :secret\r\n"
"NICK alice\r\n"
"USER alice foo bar :Twisted-IM user\r\n")
def test_channels(self):
"""
If created with an account with a list of channels, L{IRCProto}
joins each of those channels after registering.
"""
self.account.channels = ['#foo', '#bar']
transport = StringTransport()
self.proto.makeConnection(transport)
self.assertEquals(
transport.value(),
"NICK alice\r\n"
"USER alice foo bar :Twisted-IM user\r\n"
"JOIN #foo\r\n"
"JOIN #bar\r\n")
| apache-2.0 |
petricm/DIRAC | AccountingSystem/Service/DataStoreHandler.py | 6 | 8068 | """ DataStore is the service for inserting accounting reports (rows) in the Accounting DB
This service CAN be duplicated iff the first is a "master" and all the others are slaves.
"""
import datetime
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.AccountingSystem.DB.MultiAccountingDB import MultiAccountingDB
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler, getServiceOption
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = "$Id$"
class DataStoreHandler(RequestHandler):
""" DISET implementation of service for inserting records in accountingDB.
"""
__acDB = None
@classmethod
def initializeHandler(cls, svcInfoDict):
multiPath = PathFinder.getDatabaseSection("Accounting/MultiDB")
cls.__acDB = MultiAccountingDB(multiPath)
# we can run multiple services in read only mode. In that case we do not bucket
cls.runBucketing = getServiceOption(svcInfoDict, 'RunBucketing', True)
if cls.runBucketing:
cls.__acDB.autoCompactDB() # pylint: disable=no-member
result = cls.__acDB.markAllPendingRecordsAsNotTaken() # pylint: disable=no-member
if not result['OK']:
return result
gThreadScheduler.addPeriodicTask(60, cls.__acDB.loadPendingRecords) # pylint: disable=no-member
return S_OK()
types_registerType = [basestring, list, list, list]
def export_registerType(self, typeName, definitionKeyFields, definitionAccountingFields, bucketsLength):
"""
Register a new type. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections("/DIRAC/Setups")
if not retVal['OK']:
return retVal
errorsList = []
for setup in retVal['Value']:
retVal = self.__acDB.registerType( # pylint: disable=no-member
setup,
typeName,
definitionKeyFields,
definitionAccountingFields,
bucketsLength)
if not retVal['OK']:
errorsList.append(retVal['Message'])
if errorsList:
return S_ERROR("Error while registering type:\n %s" % "\n ".join(errorsList))
return S_OK()
types_setBucketsLength = [basestring, list]
def export_setBucketsLength(self, typeName, bucketsLength):
"""
Change the buckets Length. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections("/DIRAC/Setups")
if not retVal['OK']:
return retVal
errorsList = []
for setup in retVal['Value']:
retVal = self.__acDB.changeBucketsLength( # pylint: disable=no-member
setup, typeName, bucketsLength)
if not retVal['OK']:
errorsList.append(retVal['Message'])
if errorsList:
return S_ERROR("Error while changing bucketsLength type:\n %s" % "\n ".join(errorsList))
return S_OK()
types_regenerateBuckets = [basestring]
def export_regenerateBuckets(self, typeName):
"""
Recalculate buckets. (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections("/DIRAC/Setups")
if not retVal['OK']:
return retVal
errorsList = []
for setup in retVal['Value']:
retVal = self.__acDB.regenerateBuckets(setup, typeName) # pylint: disable=no-member
if not retVal['OK']:
errorsList.append(retVal['Message'])
if errorsList:
return S_ERROR("Error while recalculating buckets for type:\n %s" % "\n ".join(errorsList))
return S_OK()
types_getRegisteredTypes = []
def export_getRegisteredTypes(self):
"""
Get a list of registered types (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
return self.__acDB.getRegisteredTypes() # pylint: disable=no-member
types_deleteType = [basestring]
def export_deleteType(self, typeName):
"""
Delete accounting type and ALL its contents. VERY DANGEROUS! (Only for all powerful admins)
(Bow before me for I am admin! :)
"""
retVal = gConfig.getSections("/DIRAC/Setups")
if not retVal['OK']:
return retVal
errorsList = []
for setup in retVal['Value']:
retVal = self.__acDB.deleteType(setup, typeName) # pylint: disable=too-many-function-args,no-member
if not retVal['OK']:
errorsList.append(retVal['Message'])
if errorsList:
return S_ERROR("Error while deleting type:\n %s" % "\n ".join(errorsList))
return S_OK()
types_commit = [basestring, datetime.datetime, datetime.datetime, list]
def export_commit(self, typeName, startTime, endTime, valuesList):
"""
Add a record for a type
"""
setup = self.serviceInfoDict['clientSetup']
startTime = int(Time.toEpoch(startTime))
endTime = int(Time.toEpoch(endTime))
return self.__acDB.insertRecordThroughQueue( # pylint: disable=no-member
setup,
typeName,
startTime,
endTime,
valuesList)
types_commitRegisters = [list]
def export_commitRegisters(self, entriesList):
"""
Add a record for a type
"""
setup = self.serviceInfoDict['clientSetup']
expectedTypes = [basestring, datetime.datetime, datetime.datetime, list]
for entry in entriesList:
if len(entry) != 4:
return S_ERROR("Invalid records")
for i in range(len(entry)):
if not isinstance(entry[i], expectedTypes[i]):
gLogger.error("Unexpected type in report",
": field %d in the records should be %s (and it is %s)" % (i, expectedTypes[i],
type(entry[i])))
return S_ERROR("Unexpected type in report")
records = []
for entry in entriesList:
startTime = int(Time.toEpoch(entry[1]))
endTime = int(Time.toEpoch(entry[2]))
records.append((setup, entry[0], startTime, endTime, entry[3]))
return self.__acDB.insertRecordBundleThroughQueue(records)
types_compactDB = []
def export_compactDB(self):
"""
Compact the db by grouping buckets
"""
# if we are running slaves (not only one service) we can redirect the request to the master
# For more information please read the Administrative guide Accounting part!
# ADVICE: If you want to trigger the bucketing, please make sure the bucketing is not running!!!!
if self.runBucketing:
return self.__acDB.compactBuckets() # pylint: disable=no-member
return RPCClient('Accounting/DataStoreMaster').compactDB()
types_remove = [basestring, datetime.datetime, datetime.datetime, list]
def export_remove(self, typeName, startTime, endTime, valuesList):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict['clientSetup']
startTime = int(Time.toEpoch(startTime))
endTime = int(Time.toEpoch(endTime))
return self.__acDB.deleteRecord( # pylint: disable=no-member
setup,
typeName,
startTime,
endTime,
valuesList)
types_removeRegisters = [list]
def export_removeRegisters(self, entriesList):
"""
Remove a record for a type
"""
setup = self.serviceInfoDict['clientSetup']
expectedTypes = [basestring, datetime.datetime, datetime.datetime, list]
for entry in entriesList:
if len(entry) != 4:
return S_ERROR("Invalid records")
for i in xrange(len(entry)):
if not isinstance(entry[i], expectedTypes[i]):
return S_ERROR("%s field in the records should be %s" % (i, expectedTypes[i]))
ok = 0
for entry in entriesList:
startTime = int(Time.toEpoch(entry[1]))
endTime = int(Time.toEpoch(entry[2]))
record = entry[3]
result = self.__acDB.deleteRecord( # pylint: disable=no-member
setup,
entry[0],
startTime,
endTime,
record)
if not result['OK']:
return S_OK(ok)
ok += 1
return S_OK(ok)
| gpl-3.0 |
40223137/150601 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_loader.py | 738 | 49593 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
| agpl-3.0 |
asobrien/carlae | setup.py | 1 | 2182 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------
# Setup script for carlae package
import sys
from setuptools import setup, find_packages
### CONFIGURE BUILD VARIABLES
VERSION = "0.0.2"
### END OF CONFIGURATION
# Get requirements from file
with open('requirements.txt') as f:
required_packages = f.read().splitlines()
# Check for required Python packages
# from: http://www.pytables.org/trac-bck/browser/trunk/db.py?rev=4149
def check_import(pkgname, pkgver):
try:
mod = __import__(pkgname)
except ImportError:
exit_with_error(
"Can't find a local %s Python installation." % pkgname,
"Please read carefully the ``README`` file "
"and remember that Carlae needs the %s package "
"to compile and run." % pkgname )
else:
if mod.__version__ < pkgver:
exit_with_error(
"You need %(pkgname)s %(pkgver)s or greater to run Carlae!"
% {'pkgname': pkgname, 'pkgver': pkgver} )
print ( "* Found %(pkgname)s %(pkgver)s package installed."
% {'pkgname': pkgname, 'pkgver': mod.__version__} )
globals()[pkgname] = mod
#Having the Python version included in the package name makes managing a
#system with multiple versions of Python much easier.
def find_name(base='carlae'):
'''If "--name-with-python-version" is on the command line then
append "-pyX.Y" to the base name'''
name = base
if '--name-with-python-version' in sys.argv:
name += '-py%i.%i'%(sys.version_info[0],sys.version_info[1])
sys.argv.remove('--name-with-python-version')
return name
NAME = find_name()
#----------------------------------------------------------------------
setup(name=NAME,
version=VERSION, # EDIT
description='A simple URL shortener for your domain',
author="Anthony O'Brien",
author_email='anthony@bearonis.com',
url='https://github.com/asobrien/carlae',
license='MIT',
install_requires=required_packages,
include_package_data=True,
zip_safe=False,
packages=find_packages()
) | mit |
sylnsfar/qrcode | amzqr/mylibs/matrix.py | 1 | 6123 | # -*- coding: utf-8 -*-
from amzqr.mylibs.constant import alig_location, format_info_str, version_info_str, lindex
def get_qrmatrix(ver, ecl, bits):
num = (ver - 1) * 4 + 21
qrmatrix = [[None] * num for i in range(num)]
# [([None] * num * num)[i:i+num] for i in range(num * num) if i % num == 0]
# Add the Finder Patterns & Add the Separators
add_finder_and_separator(qrmatrix)
# Add the Alignment Patterns
add_alignment(ver, qrmatrix)
# Add the Timing Patterns
add_timing(qrmatrix)
# Add the Dark Module and Reserved Areas
add_dark_and_reserving(ver, qrmatrix)
maskmatrix = [i[:] for i in qrmatrix]
# Place the Data Bits
place_bits(bits, qrmatrix)
# Data Masking
mask_num, qrmatrix = mask(maskmatrix, qrmatrix)
# Format Information
add_format_and_version_string(ver, ecl, mask_num, qrmatrix)
return qrmatrix
def add_finder_and_separator(m):
for i in range(8):
for j in range(8):
if i in (0, 6):
m[i][j] = m[-i-1][j] = m[i][-j-1] = 0 if j == 7 else 1
elif i in (1, 5):
m[i][j] = m[-i-1][j] = m[i][-j-1] = 1 if j in (0, 6) else 0
elif i == 7:
m[i][j] = m[-i-1][j] = m[i][-j-1] = 0
else:
m[i][j] = m[-i-1][j] = m[i][-j-1] = 0 if j in (1, 5, 7) else 1
def add_alignment(ver, m):
if ver > 1:
coordinates = alig_location[ver-2]
for i in coordinates:
for j in coordinates:
if m[i][j] is None:
add_an_alignment(i, j, m)
def add_an_alignment(row, column, m):
for i in range(row-2, row+3):
for j in range(column-2, column+3):
m[i][j] = 1 if i in (row-2, row+2) or j in (column-2, column+2) else 0
m[row][column] = 1
def add_timing(m):
for i in range(8, len(m)-8):
m[i][6] = m[6][i] = 1 if i % 2 ==0 else 0
def add_dark_and_reserving(ver, m):
for j in range(8):
m[8][j] = m[8][-j-1] = m[j][8] = m[-j-1][8] = 0
m[8][8] = 0
m[8][6] = m[6][8] = m[-8][8] = 1
if ver > 6:
for i in range(6):
for j in (-9, -10, -11):
m[i][j] = m[j][i] = 0
def place_bits(bits, m):
bit = (int(i) for i in bits)
up = True
for a in range(len(m)-1, 0, -2):
a = a-1 if a <= 6 else a
irange = range(len(m)-1, -1, -1) if up else range(len(m))
for i in irange:
for j in (a, a-1):
if m[i][j] is None:
m[i][j] = next(bit)
up = not up
def mask(mm, m):
mps = get_mask_patterns(mm)
scores = []
for mp in mps:
for i in range(len(mp)):
for j in range(len(mp)):
mp[i][j] = mp[i][j] ^ m[i][j]
scores.append(compute_score(mp))
best = scores.index(min(scores))
return best, mps[best]
def get_mask_patterns(mm):
def formula(i, row, column):
if i == 0:
return (row + column) % 2 == 0
elif i == 1:
return row % 2 == 0
elif i == 2:
return column % 3 == 0
elif i == 3:
return (row + column) % 3 == 0
elif i == 4:
return (row // 2 + column // 3) % 2 == 0
elif i == 5:
return ((row * column) % 2) + ((row * column) % 3) == 0
elif i == 6:
return (((row * column) % 2) + ((row * column) % 3)) % 2 == 0
elif i == 7:
return (((row + column) % 2) + ((row * column) % 3)) % 2 == 0
mm[-8][8] = None
for i in range(len(mm)):
for j in range(len(mm)):
mm[i][j] = 0 if mm[i][j] is not None else mm[i][j]
mps = []
for i in range(8):
mp = [ii[:] for ii in mm]
for row in range(len(mp)):
for column in range(len(mp)):
mp[row][column] = 1 if mp[row][column] is None and formula(i, row, column) else 0
mps.append(mp)
return mps
def compute_score(m):
def evaluation1(m):
def ev1(ma):
sc = 0
for mi in ma:
j = 0
while j < len(mi)-4:
n = 4
while mi[j:j+n+1] in [[1]*(n+1), [0]*(n+1)]:
n += 1
(sc, j) = (sc+n-2, j+n) if n > 4 else (sc, j+1)
return sc
return ev1(m) + ev1(list(map(list, zip(*m))))
def evaluation2(m):
sc = 0
for i in range(len(m)-1):
for j in range(len(m)-1):
sc += 3 if m[i][j] == m[i+1][j] == m[i][j+1] == m[i+1][j+1] else 0
return sc
def evaluation3(m):
def ev3(ma):
sc = 0
for mi in ma:
j = 0
while j < len(mi)-10:
if mi[j:j+11] == [1,0,1,1,1,0,1,0,0,0,0]:
sc += 40
j += 7
elif mi[j:j+11] == [0,0,0,0,1,0,1,1,1,0,1]:
sc += 40
j += 4
else:
j += 1
return sc
return ev3(m) + ev3(list(map(list, zip(*m))))
def evaluation4(m):
darknum = 0
for i in m:
darknum += sum(i)
percent = darknum / (len(m)**2) * 100
s = int((50 - percent) / 5) * 5
return 2*s if s >=0 else -2*s
score = evaluation1(m) + evaluation2(m)+ evaluation3(m) + evaluation4(m)
return score
def add_format_and_version_string(ver, ecl, mask_num, m):
fs = [int(i) for i in format_info_str[lindex[ecl]][mask_num]]
for j in range(6):
m[8][j] = m[-j-1][8] = fs[j]
m[8][-j-1] = m[j][8] = fs[-j-1]
m[8][7] = m[-7][8] = fs[6]
m[8][8] = m[8][-8] = fs[7]
m[7][8] = m[8][-7] = fs[8]
if ver > 6:
vs = (int(i) for i in version_info_str[ver-7])
for j in range(5, -1, -1):
for i in (-9, -10, -11):
m[i][j] = m[j][i] = next(vs) | gpl-3.0 |
oemof/reegis-hp | reegis_hp/de21/configuration.py | 3 | 15297 | __copyright__ = "Uwe Krien"
__license__ = "GPLv3"
import config as cfg
import os
from oemof.tools import logger
class ConfigurationDe21:
def __init__(self):
self.pattern = dict()
self.files = dict()
self.general = dict()
self.url = dict()
self.pv = dict()
target_ini = list()
target_ini.append(os.path.join(os.path.dirname(__file__),
'de21_default.ini'))
target_ini.append(os.path.join(os.path.dirname(__file__),
'de21_scenario_default.ini'))
target_ini.append(os.path.join(os.path.expanduser("~"),
'.oemof', 'de21.ini'))
target_ini.append(os.path.join(os.path.expanduser("~"),
'.oemof', 'de21_scenario.ini'))
cfg.load_config(target_ini)
def check_path(pathname):
if pathname is None:
pathname = os.path.join(os.path.dirname(__file__), 'data')
if not os.path.isdir(pathname):
os.makedirs(pathname)
return pathname
def extend_path(ex_path, name):
return check_path(os.path.join(ex_path, name))
def get_list(section, parameter):
try:
my_list = cfg.get(section, parameter).split(',')
my_list = [x.strip() for x in my_list]
except AttributeError:
my_list = list((cfg.get(section, parameter),))
return my_list
def create_entries_from_list(dc, section, list_name):
names = get_list(section, list_name)
dc[list_name] = names
for name in names:
dc[name] = cfg.get(section, name)
def de21_configuration():
# initialise de21 configuration
target_ini = list()
target_ini.append(os.path.join(os.path.dirname(__file__),
'de21_default.ini'))
target_ini.append(os.path.join(os.path.dirname(__file__),
'de21_scenario_default.ini'))
target_ini.append(os.path.join(os.path.expanduser("~"),
'.oemof', 'de21.ini'))
target_ini.append(os.path.join(os.path.expanduser("~"),
'.oemof', 'de21_scenario.ini'))
cfg.load_config(target_ini)
# *************************************************************************
# ********* set paths *****************************************************
# *************************************************************************
# general sources
cfg.set('paths', 'general', extend_path(
cfg.get('paths', cfg.get('general_sources', 'path')),
cfg.get('general_sources', 'dir')))
# weather
cfg.set('paths', 'weather', extend_path(
cfg.get('paths', cfg.get('weather', 'path')),
cfg.get('weather', 'dir')))
# geometry
cfg.set('paths', 'geometry', extend_path(
cfg.get('paths', cfg.get('geometry', 'path')),
cfg.get('geometry', 'dir')))
# power plants
cfg.set('paths', 'powerplants', extend_path(
cfg.get('paths', cfg.get('powerplants', 'path')),
cfg.get('powerplants', 'dir')))
cfg.set('paths', 'conventional', extend_path(
cfg.get('paths', cfg.get('conventional', 'path')),
cfg.get('conventional', 'dir')))
cfg.set('paths', 'renewable', extend_path(
cfg.get('paths', cfg.get('renewable', 'path')),
cfg.get('renewable', 'dir')))
# static sources
cfg.set('paths', 'static', extend_path(
cfg.get('paths', cfg.get('static_sources', 'path')),
cfg.get('static_sources', 'dir')))
# messages
cfg.set('paths', 'messages', extend_path(
cfg.get('paths', cfg.get('paths', 'msg_path')),
cfg.get('paths', 'msg_dir')))
# storages
cfg.set('paths', 'storages', extend_path(
cfg.get('paths', cfg.get('storages', 'path')),
cfg.get('storages', 'dir')))
# transmission
cfg.set('paths', 'transmission', extend_path(
cfg.get('paths', cfg.get('transmission', 'path')),
cfg.get('transmission', 'dir')))
# commodity sources
cfg.set('paths', 'commodity', extend_path(
cfg.get('paths', cfg.get('commodity_sources', 'path')),
cfg.get('commodity_sources', 'dir')))
# time series
cfg.set('paths', 'time_series', extend_path(
cfg.get('paths', cfg.get('time_series', 'path')),
cfg.get('time_series', 'dir')))
# demand
cfg.set('paths', 'demand', extend_path(
cfg.get('paths', cfg.get('demand', 'path')),
cfg.get('demand', 'dir')))
# feedin*
cfg.set('paths', 'feedin', extend_path(
cfg.get('paths', cfg.get('feedin', 'path')),
cfg.get('feedin', 'dir')))
# analysis
cfg.set('paths', 'analysis', extend_path(
cfg.get('paths', cfg.get('analysis', 'path')),
cfg.get('analysis', 'dir')))
# external
cfg.set('paths', 'external', extend_path(
cfg.get('paths', cfg.get('external', 'path')),
cfg.get('external', 'dir')))
# plots
cfg.set('paths', 'plots', extend_path(
cfg.get('paths', cfg.get('plots', 'path')),
cfg.get('plots', 'dir')))
# scenario_data
cfg.set('paths', 'scenario_data', extend_path(
cfg.get('paths', cfg.get('scenario_data', 'path')),
cfg.get('scenario_data', 'dir')))
def get_configuration():
# initialise class
c = ConfigurationDe21()
# *************************************************************************
# ********* set paths *****************************************************
# *************************************************************************
# general sources
cfg.set('paths', 'general', extend_path(
cfg.get('paths', cfg.get('general_sources', 'path')),
cfg.get('general_sources', 'dir')))
# weather
cfg.set('paths', 'weather', extend_path(
cfg.get('paths', cfg.get('weather', 'path')),
cfg.get('weather', 'dir')))
# geometry
cfg.set('paths', 'geometry', extend_path(
cfg.get('paths', cfg.get('geometry', 'path')),
cfg.get('geometry', 'dir')))
# power plants
cfg.set('paths', 'powerplants', extend_path(
cfg.get('paths', cfg.get('powerplants', 'path')),
cfg.get('powerplants', 'dir')))
cfg.set('paths', 'conventional', extend_path(
cfg.get('paths', cfg.get('conventional', 'path')),
cfg.get('conventional', 'dir')))
cfg.set('paths', 'renewable', extend_path(
cfg.get('paths', cfg.get('renewable', 'path')),
cfg.get('renewable', 'dir')))
# static sources
cfg.set('paths', 'static', extend_path(
cfg.get('paths', cfg.get('static_sources', 'path')),
cfg.get('static_sources', 'dir')))
# messages
cfg.set('paths', 'messages', extend_path(
cfg.get('paths', cfg.get('paths', 'msg_path')),
cfg.get('paths', 'msg_dir')))
# storages
cfg.set('paths', 'storages', extend_path(
cfg.get('paths', cfg.get('storages', 'path')),
cfg.get('storages', 'dir')))
# transmission
cfg.set('paths', 'transmission', extend_path(
cfg.get('paths', cfg.get('transmission', 'path')),
cfg.get('transmission', 'dir')))
# commodity sources
cfg.set('paths', 'commodity', extend_path(
cfg.get('paths', cfg.get('commodity_sources', 'path')),
cfg.get('commodity_sources', 'dir')))
# time series
cfg.set('paths', 'time_series', extend_path(
cfg.get('paths', cfg.get('time_series', 'path')),
cfg.get('time_series', 'dir')))
# demand
cfg.set('paths', 'demand', extend_path(
cfg.get('paths', cfg.get('demand', 'path')),
cfg.get('demand', 'dir')))
# feedin*
cfg.set('paths', 'feedin', extend_path(
cfg.get('paths', cfg.get('feedin', 'path')),
cfg.get('feedin', 'dir')))
# analysis
cfg.set('paths', 'analysis', extend_path(
cfg.get('paths', cfg.get('analysis', 'path')),
cfg.get('analysis', 'dir')))
# external
cfg.set('paths', 'external', extend_path(
cfg.get('paths', cfg.get('external', 'path')),
cfg.get('external', 'dir')))
# plots
cfg.set('paths', 'plots', extend_path(
cfg.get('paths', cfg.get('plots', 'path')),
cfg.get('plots', 'dir')))
# scenario_data
cfg.set('paths', 'scenario_data', extend_path(
cfg.get('paths', cfg.get('scenario_data', 'path')),
cfg.get('scenario_data', 'dir')))
# *************************************************************************
# ********* old stuff *****************************************************
# *************************************************************************
# ********* general ******************************************************
c.general['overwrite'] = cfg.get('general', 'overwrite')
c.general['skip_weather'] = cfg.get('general', 'skip_weather')
c.general['skip_re_power_plants'] = cfg.get('general',
'skip_re_power_plants')
c.general['skip_conv_power_plants'] = cfg.get('general',
'skip_conv_power_plants')
c.general['skip_feedin_weather'] = cfg.get('general', 'skip_feedin_weather')
c.general['skip_feedin_region'] = cfg.get('general', 'skip_feedin_region')
c.general['skip_time_series'] = cfg.get('general', 'skip_time_series')
# ********* download *****************************************************
c.url['conventional_data'] = cfg.get('download', 'url_conventional_data')
c.url['conventional_readme'] = cfg.get('download',
'url_conventional_readme')
c.url['conventional_json'] = cfg.get('download', 'url_conventional_json')
c.url['renewable_data'] = cfg.get('download', 'url_renewable_data')
c.url['renewable_readme'] = cfg.get('download', 'url_renewable_readme')
c.url['renewable_json'] = cfg.get('download', 'url_renewable_json')
c.url['time_series_data'] = cfg.get('download', 'url_timeseries_data')
c.url['time_series_readme'] = cfg.get('download', 'url_timeseries_readme')
c.url['time_series_json'] = cfg.get('download', 'url_timeseries_json')
c.url['bmwi_energiedaten'] = cfg.get('download', 'url_bmwi_energiedaten')
# ********* general sources **********************************************
c.files['bmwi_energiedaten'] = cfg.get(
'general_sources', 'bmwi_energiedaten')
c.files['vg250_ew_shp'] = cfg.get('general_sources', 'vg250_ew_shp')
c.files['vg250_ew_zip'] = cfg.get('general_sources', 'vg250_ew_zip')
# ********* static sources ************************************************
c.files['demand_share'] = cfg.get('static_sources', 'demand_share')
c.files['data_electricity_grid'] = cfg.get('static_sources',
'data_electricity_grid')
c.files['patch_offshore_wind'] = cfg.get('static_sources',
'patch_offshore_wind')
c.files['znes_flens'] = cfg.get('static_sources', 'znes_flens_data')
# ********* weather ******************************************************
c.files['grid_geometry'] = cfg.get('weather', 'grid_polygons')
c.files['region_geometry'] = cfg.get('weather', 'clip_geometry')
c.pattern['weather'] = cfg.get('weather', 'file_pattern')
c.files['average_wind_speed'] = cfg.get('weather', 'avg_wind_speed_file')
# ********* geometry *****************************************************
c.files['federal_states_centroid'] = cfg.get('geometry',
'federalstates_centroid')
c.files['federal_states_polygon'] = cfg.get('geometry',
'federalstates_polygon')
c.files['region_polygons'] = cfg.get('geometry',
'region_polygons')
c.files['region_polygons_simple'] = cfg.get('geometry',
'region_polygons_simple')
c.files['region_labels'] = cfg.get('geometry', 'region_labels')
c.files['powerlines_lines'] = cfg.get('geometry', 'powerlines_lines')
c.files['powerlines_labels'] = cfg.get('geometry', 'powerlines_labels')
c.files['coastdatgrid_centroids'] = cfg.get('geometry',
'coastdatgrid_centroids')
c.files['coastdatgrid_polygons'] = cfg.get('geometry',
'coastdatgrid_polygons')
c.files['postcode'] = cfg.get('geometry', 'postcode_polygons')
# ********* power plants *************************************************
c.pattern['original'] = cfg.get('powerplants', 'original_file_pattern')
c.pattern['fixed'] = cfg.get('powerplants', 'fixed_file_pattern')
c.pattern['prepared'] = cfg.get('powerplants', 'prepared_csv_file_pattern')
c.pattern['grouped'] = cfg.get('powerplants', 'grouped_file_pattern')
c.pattern['readme'] = cfg.get('powerplants', 'readme_file_pattern')
c.pattern['json'] = cfg.get('powerplants', 'json_file_pattern')
c.pattern['shp'] = cfg.get('powerplants', 'shp_file_pattern')
c.files['transformer'] = cfg.get('powerplants', 'transformer_file')
c.files['sources'] = cfg.get('powerplants', 'sources_file')
# ********* storages ******************************************************
c.files['hydro_storages'] = cfg.get('storages', 'hydro_storages_file')
c.files['hydro_storages_de21'] = cfg.get(
'storages', 'grouped_storages_file')
# ********* transmission **************************************************
c.files['transmission_data'] = cfg.get('transmission',
'transmission_data_file')
c.files['transmission_de21'] = cfg.get('transmission',
'transmission_de21_file')
c.general['security_factor'] = cfg.get('transmission', 'security_factor')
c.general['current_max'] = cfg.get('transmission', 'current_max')
# ********* commodity sources *********************************************
c.files['commodity_sources'] = cfg.get('commodity_sources',
'commodity_sources_file')
# ********* feedin ********************************************************
c.pattern['feedin'] = cfg.get('feedin', 'feedin_file_pattern')
c.pattern['feedin_de21'] = cfg.get('feedin', 'feedin_de21_pattern')
c.general['solar_set'] = cfg.get('solar', 'solar_set')
# ******** scenario ******************************************************
c.general['name'] = cfg.get('general', 'name')
c.general['year'] = cfg.get('general', 'year')
c.general['weather_year'] = cfg.get('general', 'weather_year')
c.general['demand_year'] = cfg.get('general', 'demand_year')
c.general['optimisation_target'] = cfg.get('general', 'optimisation_target')
c.general['local_sources'] = get_list('general', 'local_commodity_sources')
c.files['renewable_capacities'] = cfg.get('files', 'renewable_capacities')
create_entries_from_list(c.pv, 'pv', 'module_inverter_types')
create_entries_from_list(c.pv, 'pv', 'orientation_types')
return c
| gpl-3.0 |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/python/training/server_lib_multiple_containers_test.py | 133 | 2414 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class MultipleContainersTest(test.TestCase):
# Verifies behavior of tf.Session.reset() with multiple containers using
# tf.container.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testMultipleContainers(self):
with ops.container("test0"):
v0 = variables.Variable(1.0, name="v0")
with ops.container("test1"):
v1 = variables.Variable(2.0, name="v0")
server = server_lib.Server.create_local_server()
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
session.Session.reset(server.target, ["test0"])
with self.assertRaises(errors_impl.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be uninitialized. But v1 should still
# be valid.
sess = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
if __name__ == "__main__":
test.main()
| apache-2.0 |
legalsylvain/OpenUpgrade | addons/resource/tests/__init__.py | 121 | 1146 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.resource.tests import test_resource
checks = [
test_resource,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Eseoghene/bite-project | deps/gdata-python-client/src/gdata/calendar_resource/client.py | 49 | 7210 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CalendarResourceClient simplifies Calendar Resources API calls.
CalendarResourceClient extends gdata.client.GDClient to ease interaction with
the Google Apps Calendar Resources API. These interactions include the ability
to create, retrieve, update, and delete calendar resources in a Google Apps
domain.
"""
__author__ = 'Vic Fryzel <vf@google.com>'
import gdata.calendar_resource.data
import gdata.client
import urllib
# Feed URI template. This must end with a /
# The strings in this template are eventually replaced with the API version
# and Google Apps domain name, respectively.
RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/'
class CalendarResourceClient(gdata.client.GDClient):
"""Client extension for the Google Calendar Resource API service.
Attributes:
host: string The hostname for the Calendar Resouce API service.
api_version: string The version of the Calendar Resource API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Calendar Resource API.
Args:
domain: string The Google Apps domain with Calendar Resources.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the calendar resource
data.
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_resource_feed_uri(self, resource_id=None, params=None):
"""Creates a resource feed URI for the Calendar Resource API.
Using this client's Google Apps domain, create a feed URI for calendar
resources in that domain. If a resource_id is provided, return a URI
for that specific resource. If params are provided, append them as GET
params.
Args:
resource_id: string (optional) The ID of the calendar resource for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for calendar resources for this client's Google
Apps domain.
"""
uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain)
if resource_id:
uri += resource_id
if params:
uri += '?' + urllib.urlencode(params)
return uri
MakeResourceFeedUri = make_resource_feed_uri
def get_resource_feed(self, uri=None, **kwargs):
"""Fetches a ResourceFeed of calendar resources at the given URI.
Args:
uri: string The URI of the feed to pull.
kwargs: The other parameters to pass to gdata.client.GDClient.get_feed().
Returns:
A ResourceFeed object representing the feed at the given URI.
"""
if uri is None:
uri = self.MakeResourceFeedUri()
return self.get_feed(
uri,
desired_class=gdata.calendar_resource.data.CalendarResourceFeed,
**kwargs)
GetResourceFeed = get_resource_feed
def get_resource(self, uri=None, resource_id=None, **kwargs):
"""Fetches a single calendar resource by resource ID.
Args:
uri: string The base URI of the feed from which to fetch the resource.
resource_id: string The string ID of the Resource to fetch.
kwargs: The other parameters to pass to gdata.client.GDClient.get_entry().
Returns:
A Resource object representing the calendar resource with the given
base URI and resource ID.
"""
if uri is None:
uri = self.MakeResourceFeedUri(resource_id)
return self.get_entry(
uri,
desired_class=gdata.calendar_resource.data.CalendarResourceEntry,
**kwargs)
GetResource = get_resource
def create_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Creates a calendar resource with the given properties.
Args:
resource_id: string The resource ID of the calendar resource.
resource_common_name: string (optional) The common name of the resource.
resource_description: string (optional) The description of the resource.
resource_type: string (optional) The type of the resource.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the new resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id,
resource_common_name=resource_common_name,
resource_description=resource_description,
resource_type=resource_type)
return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs)
CreateResource = create_resource
def update_resource(self, resource_id, resource_common_name=None,
resource_description=None, resource_type=None, **kwargs):
"""Updates the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to update.
resource_common_name: string (optional) The common name to give the
resource.
resource_description: string (optional) The description to give the
resource.
resource_type: string (optional) The type to give the resource.
kwargs: The other parameters to pass to gdata.client.GDClient.update().
Returns:
gdata.calendar_resource.data.CalendarResourceEntry of the updated
resource.
"""
new_resource = gdata.calendar_resource.data.CalendarResourceEntry(
resource_id=resource_id,
resource_common_name=resource_common_name,
resource_description=resource_description,
resource_type=resource_type)
return self.update(new_resource, uri=self.MakeResourceFeedUri(resource_id),
**kwargs)
UpdateResource = update_resource
def delete_resource(self, resource_id, **kwargs):
"""Deletes the calendar resource with the given resource ID.
Args:
resource_id: string The resource ID of the calendar resource to delete.
kwargs: The other parameters to pass to gdata.client.GDClient.delete()
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeResourceFeedUri(resource_id), **kwargs)
DeleteResource = delete_resource
| apache-2.0 |
timothydmorton/bokeh | bokeh/server/utils/plugins.py | 29 | 2809 | """ Utilities for writing plugins.
This is different from bokeh.pluginutils because these are ways of
patching routes and objects directly into the bokeh server. You
would run this type of code using the --script option
"""
from __future__ import absolute_import
import uuid
from flask import abort, render_template
from bokeh.exceptions import DataIntegrityException
from bokeh.resources import Resources
from ..app import bokeh_app
from ..views.backbone import init_bokeh
from ..views.main import _makedoc
from ..settings import settings as server_settings
def object_page(prefix):
""" Decorator for a function which turns an object into a web page
from bokeh.server.app import bokeh_app
@bokeh_app.route("/myapp")
@object_page("mypage")
def make_object():
#make some bokeh object here
return obj
This decorator will
- create a randomized title for a bokeh document using the prefix
- initialize bokeh plotting libraries to use that document
- call the function you pass in, add that object to the plot context
- render that object in a web page
"""
def decorator(func):
def wrapper(*args, **kwargs):
## setup the randomly titled document
docname = prefix + str(uuid.uuid4())
bokehuser = bokeh_app.current_user()
try:
doc = _makedoc(bokeh_app.servermodel_storage, bokehuser, docname)
doc.published = True
doc.save(bokeh_app.servermodel_storage)
except DataIntegrityException as e:
return abort(409, e.message)
docid = doc.docid
clientdoc = bokeh_app.backbone_storage.get_document(docid)
## initialize our plotting APIs to use that document
init_bokeh(clientdoc)
obj = func(*args, **kwargs)
clientdoc.add(obj)
bokeh_app.backbone_storage.store_document(clientdoc)
if hasattr(obj, 'extra_generated_classes'):
extra_generated_classes = obj.extra_generated_classes
else:
extra_generated_classes = []
resources = Resources()
return render_template("oneobj.html",
elementid=str(uuid.uuid4()),
docid=docid,
objid=obj._id,
hide_navbar=True,
extra_generated_classes=extra_generated_classes,
splitjs=server_settings.splitjs,
public='true',
loglevel=resources.log_level)
wrapper.__name__ = func.__name__
return wrapper
return decorator
| bsd-3-clause |
BT-fgarbely/l10n-switzerland | l10n_ch_states/__openerp__.py | 4 | 1342 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Mathias Neef
# Copyright 2015 copadoMEDIA UG
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Switzerland Country States',
'category': 'Localisation',
'summary': '',
'version': '8.0.1.0.0',
'author': 'copado MEDIA UG, Odoo Community Association (OCA)',
'website': 'http://www.copado.de',
'license': 'AGPL-3',
'depends': [
'base',
],
'data': ['data/res_country_states.xml'],
'demo': [],
'installable': True,
'application': False,
}
| agpl-3.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/test/test_import.py | 48 | 14273 | import unittest
import os
import random
import shutil
import sys
import py_compile
import warnings
import marshal
from test.test_support import unlink, TESTFN, unload, run_unittest, check_warnings
def remove_files(name):
for f in (name + os.extsep + "py",
name + os.extsep + "pyc",
name + os.extsep + "pyo",
name + os.extsep + "pyw",
name + "$py.class"):
if os.path.exists(f):
os.remove(f)
class ImportTest(unittest.TestCase):
def testCaseSensitivity(self):
# Brief digression to test that import is case-sensitive: if we got this
# far, we know for sure that "random" exists.
try:
import RAnDoM
except ImportError:
pass
else:
self.fail("import of RAnDoM should have failed (case mismatch)")
def testDoubleConst(self):
# Another brief digression to test the accuracy of manifest float constants.
from test import double_const # don't blink -- that *was* the test
def testImport(self):
def test_with_extension(ext):
# ext normally ".py"; perhaps ".pyw"
source = TESTFN + ext
pyo = TESTFN + os.extsep + "pyo"
if sys.platform.startswith('java'):
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + os.extsep + "pyc"
f = open(source, "w")
print >> f, "# This tests Python's ability to import a", ext, "file."
a = random.randrange(1000)
b = random.randrange(1000)
print >> f, "a =", a
print >> f, "b =", b
f.close()
try:
try:
mod = __import__(TESTFN)
except ImportError, err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEquals(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEquals(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
os.unlink(source)
try:
try:
reload(mod)
except ImportError, err:
self.fail("import from .pyc/.pyo failed: %s" % err)
finally:
try:
os.unlink(pyc)
except OSError:
pass
try:
os.unlink(pyo)
except OSError:
pass
del sys.modules[TESTFN]
sys.path.insert(0, os.curdir)
try:
test_with_extension(os.extsep + "py")
if sys.platform.startswith("win"):
for ext in ".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw":
test_with_extension(ext)
finally:
del sys.path[0]
def testImpModule(self):
# Verify that the imp module can correctly load and find .py files
import imp
x = imp.find_module("os")
os = imp.load_module("os", *x)
def test_module_with_large_stack(self, module='longlist'):
# create module w/list of 65000 elements to test bug #561858
filename = module + os.extsep + 'py'
# create a file with a list of 65000 elements
f = open(filename, 'w+')
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
f.close()
# compile & remove .py file, we only need .pyc (or .pyo)
f = open(filename, 'r')
py_compile.compile(filename)
f.close()
os.unlink(filename)
# need to be able to load from current dir
sys.path.append('')
# this used to crash
exec 'import ' + module
# cleanup
del sys.path[-1]
for ext in 'pyc', 'pyo':
fname = module + os.extsep + ext
if os.path.exists(fname):
os.unlink(fname)
def test_failing_import_sticks(self):
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1/0"
f.close()
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
try:
for i in 1, 2, 3:
try:
mod = __import__(TESTFN)
except ZeroDivisionError:
if TESTFN in sys.modules:
self.fail("damaged module in sys.modules on %i. try" % i)
else:
self.fail("was able to import a damaged module on %i. try" % i)
finally:
sys.path.pop(0)
remove_files(TESTFN)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1"
print >> f, "b = 2"
f.close()
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assert_(TESTFN in sys.modules, "expected module in sys.modules")
self.assertEquals(mod.a, 1, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
f = open(source, "w")
print >> f, "a = 10"
print >> f, "b = 20//0"
f.close()
self.assertRaises(ZeroDivisionError, reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.failIf(mod is None, "expected module to still be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEquals(mod.a, 10, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
finally:
sys.path.pop(0)
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_infinite_reload(self):
# Bug #742342 reports that Python segfaults (infinite recursion in C)
# when faced with self-recursive reload()ing.
sys.path.insert(0, os.path.dirname(__file__))
try:
import infinite_reload
finally:
sys.path.pop(0)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.test_support
self.assert_(x is test, x.__name__)
self.assert_(hasattr(test.test_support, "__file__"))
# import x.y.z as w binds z as w
import test.test_support as y
self.assert_(y is test.test_support, y.__name__)
def test_import_initless_directory_warning(self):
with warnings.catch_warnings():
# Just a random non-package directory we always expect to be
# somewhere in sys.path...
warnings.simplefilter('error', ImportWarning)
self.assertRaises(ImportWarning, __import__, "site-packages")
def test_importbyfilename(self):
path = os.path.abspath(TESTFN)
try:
__import__(path)
except ImportError, err:
self.assertEqual("Import by filename is not supported.",
err.args[0])
else:
self.fail("import by path didn't raise an exception")
class TestPycRewriting(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.func_code.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = file_name + ("c" if __debug__ else "o")
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
del sys.modules[self.module_name]
for file_name in self.file_name, self.compiled_name:
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(self.dir_name):
shutil.rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(8)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.func_code
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
shutil.rmtree(self.path)
sys.path = self.syspath
# http://bugs.python.org/issue1293
def test_trailing_slash(self):
f = open(os.path.join(self.path, 'test_trailing_slash.py'), 'w')
f.write("testdata = 'test_trailing_slash'")
f.close()
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
class RelativeImport(unittest.TestCase):
def tearDown(self):
try:
del sys.modules["test.relimport"]
except:
pass
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImport"))
def test_issue3221(self):
def check_absolute():
exec "from os import path" in ns
def check_relative():
exec "from . import relimport" in ns
# Check both OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_absolute()
check_relative()
# Check both OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_absolute()
check_relative()
# Check relative fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check relative fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check both fail with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(ValueError, check_absolute)
self.assertRaises(ValueError, check_relative)
def test_main(verbose=None):
run_unittest(ImportTest, TestPycRewriting, PathsTests, RelativeImport)
if __name__ == '__main__':
# test needs to be a package, so we can do relative import
from test.test_import import test_main
test_main()
| apache-2.0 |
alexlib/openptv | liboptv/tests/gen_track_data.py | 3 | 1925 | """
Generate a 5-frame trajectory that is pretty degenerates so is good for
testing. It starts from (0,0,0) and moves in a straight line on the x axis,
at a slow velocity.
"""
import numpy as np
from optv.calibration import Calibration
from optv.parameters import ControlParams
from optv.imgcoord import image_coordinates
from optv.transforms import convert_arr_metric_to_pixel
num_cams = 3
num_frames = 5
velocity = 0.01
part_traject = np.zeros((num_frames,3))
part_traject[:,0] = np.r_[:num_frames]*velocity
# Find targets on each camera.
cpar = ControlParams(3)
cpar.read_control_par("testing_fodder/track/parameters/control_newpart.par")
targs = []
for cam in xrange(num_cams):
cal = Calibration()
cal.from_file(
"testing_fodder/cal/sym_cam%d.tif.ori" % (cam + 1),
"testing_fodder/cal/cam1.tif.addpar")
targs.append(convert_arr_metric_to_pixel(image_coordinates(
part_traject, cal, cpar.get_multimedia_params()), cpar))
for frame in xrange(num_frames):
# write 3D positions:
with open("testing_fodder/track/res_orig/particles.%d" % (frame + 1), "w") as outfile:
# Note correspondence to the single target in each frame.
outfile.writelines([
str(1) + "\n",
"{:5d}{:10.3f}{:10.3f}{:10.3f}{:5d}{:5d}{:5d}{:5d}\n".format(
1, part_traject[frame,0], part_traject[frame,1],
part_traject[frame,1], 0, 0, 0, 0)])
# write associated targets from all cameras:
for cam in xrange(num_cams):
with open("testing_fodder/track/newpart/cam%d.%04d_targets" \
% (cam + 1, frame + 1), "w") as outfile:
outfile.writelines([
str(1) + "\n",
"{:5d}{:10.3f}{:10.3f}{:5d}{:5d}{:5d}{:10d}{:5d}\n".format(
0, targs[cam][frame, 0], targs[cam][frame, 1],
100, 10, 10, 10000, 0)])
# That's all, folks!
| lgpl-3.0 |
DalikarFT/CFVOP | venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
Intel-tensorflow/tensorflow | tensorflow/python/summary/writer/event_file_writer_v2.py | 6 | 5788 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
class EventFileWriterV2(object):
"""Writes `Event` protocol buffers to an event file via the graph.
The `EventFileWriterV2` class is backed by the summary file writer in the v2
summary API (currently in tf.contrib.summary), so it uses a shared summary
writer resource and graph ops to write events.
As with the original EventFileWriter, this class will asynchronously write
Event protocol buffers to the backing file. The Event file is encoded using
the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init() # pylint: disable=assignment-from-no-return
self._flush_op = file_writer.flush() # pylint: disable=assignment-from-no-return
self._close_op = file_writer.close() # pylint: disable=assignment-from-no-return
self._session.run(self._init_op)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._session.run(self._init_op)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(
self._add_event_op, feed_dict={self._event_placeholder: event_pb})
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._session.run(self._flush_op)
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._session.run(self._close_op)
self._closed = True
| apache-2.0 |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/sqlalchemy/testing/assertions.py | 11 | 16112 | # testing/assertions.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from . import util as testutil
from sqlalchemy import pool, orm, util
from sqlalchemy.engine import default, url
from sqlalchemy.util import decorator
from sqlalchemy import types as sqltypes, schema, exc as sa_exc
import warnings
import re
from .exclusions import db_spec, _is_excluded
from . import assertsql
from . import config
from .util import fail
import contextlib
from . import mock
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarnings emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
"""
return _expect_warnings(sa_exc.SAWarning, messages, **kw)
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
"""Context manager which expects one or more warnings on specific
dialects.
The expect version **asserts** that the warnings were in fact seen.
"""
spec = db_spec(db)
if isinstance(db, util.string_types) and not spec(config._current):
yield
else:
with expect_warnings(*messages, **kw):
yield
def emits_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(assert_=False, *messages):
return fn(*args, **kw)
return decorate
def expect_deprecated(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def emits_warning_on(db, *messages):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
Note that emits_warning_on does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
Note that uses_deprecated does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
@contextlib.contextmanager
def _expect_warnings(exc_cls, messages, regex=True, assert_=True):
if regex:
filters = [re.compile(msg, re.I | re.S) for msg in messages]
else:
filters = messages
seen = set(filters)
real_warn = warnings.warn
def our_warn(msg, exception, *arg, **kw):
if not issubclass(exception, exc_cls):
return real_warn(msg, exception, *arg, **kw)
if not filters:
return
for filter_ in filters:
if (regex and filter_.match(msg)) or \
(not regex and filter_ == msg):
seen.discard(filter_)
break
else:
real_warn(msg, exception, *arg, **kw)
with mock.patch("warnings.warn", our_warn):
yield
if assert_:
assert not seen, "Warnings were not seen: %s" % \
", ".join("%r" % (s.pattern if regex else s) for s in seen)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
_assert_no_stray_pool_connections()
_STRAY_CONNECTION_FAILURES = 0
def _assert_no_stray_pool_connections():
global _STRAY_CONNECTION_FAILURES
# lazy gc on cPython means "do nothing." pool connections
# shouldn't be in cycles, should go away.
testutil.lazy_gc()
# however, once in awhile, on an EC2 machine usually,
# there's a ref in there. usually just one.
if pool._refs:
# OK, let's be somewhat forgiving.
_STRAY_CONNECTION_FAILURES += 1
print("Encountered a stray connection in test cleanup: %s"
% str(pool._refs))
# then do a real GC sweep. We shouldn't even be here
# so a single sweep should really be doing it, otherwise
# there's probably a real unreachable cycle somewhere.
testutil.gc_collect()
# if we've already had two of these occurrences, or
# after a hard gc sweep we still have pool._refs?!
# now we have to raise.
if pool._refs:
err = str(pool._refs)
# but clean out the pool refs collection directly,
# reset the counter,
# so the error doesn't at least keep happening.
pool._refs.clear()
_STRAY_CONNECTION_FAILURES = 0
assert False, "Stray connection refused to leave "\
"after gc.collect(): %s" % err
elif _STRAY_CONNECTION_FAILURES > 10:
assert False, "Encountered more than 10 stray connections"
_STRAY_CONNECTION_FAILURES = 0
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def le_(a, b, msg=None):
"""Assert a <= b, with repr messaging on failure."""
assert a <= b, msg or "%r != %r" % (a, b)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not_(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
def in_(a, b, msg=None):
"""Assert a in b, with repr messaging on failure."""
assert a in b, msg or "%r not in %r" % (a, b)
def not_in_(a, b, msg=None):
"""Assert a in not b, with repr messaging on failure."""
assert a not in b, msg or "%r is in %r" % (a, b)
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a, fragment)
def assert_raises(except_cls, callable_, *args, **kw):
try:
callable_(*args, **kw)
success = False
except except_cls:
success = True
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
try:
callable_(*args, **kwargs)
assert False, "Callable did not raise an exception"
except except_cls as e:
assert re.search(
msg, util.text_type(e), re.UNICODE), "%r !~ %s" % (msg, e)
print(util.text_type(e).encode('utf-8'))
class AssertsCompiledSQL(object):
def assert_compile(self, clause, result, params=None,
checkparams=None, dialect=None,
checkpositional=None,
check_prefetch=None,
use_default_dialect=False,
allow_dialect_select=False,
literal_binds=False):
if use_default_dialect:
dialect = default.DefaultDialect()
elif allow_dialect_select:
dialect = None
else:
if dialect is None:
dialect = getattr(self, '__dialect__', None)
if dialect is None:
dialect = config.db.dialect
elif dialect == 'default':
dialect = default.DefaultDialect()
elif isinstance(dialect, util.string_types):
dialect = url.URL(dialect).get_dialect()()
kw = {}
compile_kwargs = {}
if params is not None:
kw['column_keys'] = list(params)
if literal_binds:
compile_kwargs['literal_binds'] = True
if isinstance(clause, orm.Query):
context = clause._compile_context()
context.statement.use_labels = True
clause = context.statement
if compile_kwargs:
kw['compile_kwargs'] = compile_kwargs
c = clause.compile(dialect=dialect, **kw)
param_str = repr(getattr(c, 'params', {}))
if util.py3k:
param_str = param_str.encode('utf-8').decode('ascii', 'ignore')
print(
("\nSQL String:\n" +
util.text_type(c) +
param_str).encode('utf-8'))
else:
print(
"\nSQL String:\n" +
util.text_type(c).encode('utf-8') +
param_str)
cc = re.sub(r'[\n\t]', '', util.text_type(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
if check_prefetch is not None:
eq_(c.prefetch, check_prefetch)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert isinstance(reflected_c.type, type(c.type)), \
msg % (reflected_c.type, c.type)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
set([f.column.name for f in c.foreign_keys]),
set([f.column.name for f in reflected_c.foreign_keys])
)
if c.server_default:
assert isinstance(reflected_c.server_default,
schema.FetchedValue)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(c2.type),\
"On column %r, type '%s' doesn't correspond to type '%s'" % \
(c1.name, c1.type, c2.type)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list):
self.assert_(len(result) == len(list),
"result list is not the same size as test list, " +
"for class " + class_.__name__)
for i in range(0, len(list)):
self.assert_row(class_, result[i], list[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(rowobj.__class__ is class_,
"item class is not " + repr(class_))
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(getattr(rowobj, key) == value,
"attribute %s value %s does not match %s" % (
key, getattr(rowobj, key), value))
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = set([immutabledict(e) for e in expected])
for wrong in util.itertools_filterfalse(lambda o:
isinstance(o, cls), found):
fail('Unexpected type "%s", expected "%s"' % (
type(wrong).__name__, cls.__name__))
if len(found) != len(expected):
fail('Unexpected object count "%s", expected "%s"' % (
len(found), len(expected)))
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found." % (
cls.__name__, repr(expected_item)))
return True
def sql_execution_asserter(self, db=None):
if db is None:
from . import db as db
return assertsql.assert_engine(db)
def assert_sql_execution(self, db, callable_, *rules):
with self.sql_execution_asserter(db) as asserter:
callable_()
asserter.assert_(*rules)
def assert_sql(self, db, callable_, rules):
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(*[
assertsql.CompiledSQL(k, v) for k, v in rule.items()
])
else:
newrule = assertsql.CompiledSQL(*rule)
newrules.append(newrule)
self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, *rules):
assertsql.asserter.add_rules(rules)
try:
yield
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_statement_count(self, count):
return self.assert_execution(assertsql.CountStatements(count))
| mit |
dnlm92/chokoretto | temp/venv/lib/python2.7/site-packages/oauthlib/oauth2/rfc6749/request_validator.py | 36 | 19514 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import logging
log = logging.getLogger(__name__)
class RequestValidator(object):
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
OBS! Certain grant types rely on this authentication, possibly with
other fallbacks, and for them to recognize this authorization please
set the client attribute on the request (request.client). Note that
preferably this client object should have a client_id attribute of
unicode type (request.client.client_id).
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: http://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
If the client specifies a redirect_uri when obtaining code then
that redirect URI must be bound to the code and verified equal
in this method.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier
:param code: The authorization code grant (request.code).
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be associated with:
- a client and it's client_id
- the redirect URI used (request.redirect_uri)
- whether the redirect URI used is the client default or not
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
The authorization code grant dict (code) holds at least the key 'code'::
{'code': 'sdf345jsdf0934f'}
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes
:param client_id: Unicode client identifier
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: The HTTP Request (oauthlib.common.Request)
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the authorization_code is valid and assigned to client.
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes and
request.state must also be set.
:param client_id: Unicode client identifier
:param code: Unicode authorization code
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param redirect_uri: Unicode absolute URI
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier
:param scopes: List of scopes (defined by you)
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/gateway_route_py3.py | 7 | 2245 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GatewayRoute(Model):
"""Gateway routing details.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar local_address: The gateway's local address
:vartype local_address: str
:ivar network: The route's network prefix
:vartype network: str
:ivar next_hop: The route's next hop
:vartype next_hop: str
:ivar source_peer: The peer this route was learned from
:vartype source_peer: str
:ivar origin: The source this route was learned from
:vartype origin: str
:ivar as_path: The route's AS path sequence
:vartype as_path: str
:ivar weight: The route's weight
:vartype weight: int
"""
_validation = {
'local_address': {'readonly': True},
'network': {'readonly': True},
'next_hop': {'readonly': True},
'source_peer': {'readonly': True},
'origin': {'readonly': True},
'as_path': {'readonly': True},
'weight': {'readonly': True},
}
_attribute_map = {
'local_address': {'key': 'localAddress', 'type': 'str'},
'network': {'key': 'network', 'type': 'str'},
'next_hop': {'key': 'nextHop', 'type': 'str'},
'source_peer': {'key': 'sourcePeer', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'as_path': {'key': 'asPath', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(GatewayRoute, self).__init__(**kwargs)
self.local_address = None
self.network = None
self.next_hop = None
self.source_peer = None
self.origin = None
self.as_path = None
self.weight = None
| mit |
KonstantinRitt/qmqtt | tests/gtest/gtest/googletest/googletest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
ShinyROM/android_external_chromium_org | chrome/common/extensions/docs/server2/compiled_file_system_test.py | 23 | 9146 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
from appengine_wrappers import GetAppVersion
from compiled_file_system import CompiledFileSystem
from copy import deepcopy
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
},
'deepdir': {
'deepfile.html': 'deepfile.html contents',
'deeper': {
'deepest.html': 'deepest.html contents',
},
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
identity = lambda _, x: x
def _GetTestCompiledFsCreator():
'''Returns a function which creates CompiledFileSystem views of
TestFileSystems backed by _TEST_DATA.
'''
return functools.partial(
CompiledFileSystem.Factory(
ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True),
).Create,
TestFileSystem(deepcopy(_TEST_DATA)))
class CompiledFileSystemTest(unittest.TestCase):
def testPopulateNamespace(self):
def CheckNamespace(expected_file, expected_list, fs):
self.assertEqual(expected_file, fs._file_object_store.namespace)
self.assertEqual(expected_list, fs._list_object_store.namespace)
compiled_fs_creator = _GetTestCompiledFsCreator()
f = lambda x: x
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest))
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest, category='foo'))
def testPopulateFromFile(self):
def Sleepy(key, val):
return '%s%s' % ('Z' * len(key), 'z' * len(val))
compiled_fs = _GetTestCompiledFsCreator()(Sleepy, CompiledFileSystemTest)
self.assertEqual('ZZZZZZZZzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/a11y.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('/apps/fakedir/file.html').Get())
def testPopulateFromFileListing(self):
def strip_ext(path, files):
return [os.path.splitext(f)[0] for f in files]
compiled_fs = _GetTestCompiledFsCreator()(strip_ext, CompiledFileSystemTest)
expected_top_listing = [
'404',
'apps/a11y',
'apps/about_apps',
'apps/deepdir/deeper/deepest',
'apps/deepdir/deepfile',
'apps/fakedir/file',
'extensions/activeTab',
'extensions/alarms'
]
self.assertEqual(expected_top_listing,
sorted(compiled_fs.GetFromFileListing('/').Get()))
self.assertEqual(expected_top_listing,
sorted(compiled_fs.GetFromFileListing('').Get()))
expected_apps_listing = [
'a11y',
'about_apps',
'deepdir/deeper/deepest',
'deepdir/deepfile',
'fakedir/file',
]
self.assertEqual(expected_apps_listing,
sorted(compiled_fs.GetFromFileListing('/apps/').Get()))
self.assertEqual(expected_apps_listing,
sorted(compiled_fs.GetFromFileListing('apps/').Get()))
self.assertEqual(['file',],
compiled_fs.GetFromFileListing('/apps/fakedir/').Get())
self.assertEqual(['file',],
compiled_fs.GetFromFileListing('apps/fakedir/').Get())
self.assertEqual(['deeper/deepest', 'deepfile'],
sorted(compiled_fs.GetFromFileListing(
'/apps/deepdir/').Get()))
self.assertEqual(['deeper/deepest', 'deepfile'],
sorted(compiled_fs.GetFromFileListing(
'apps/deepdir/').Get()))
self.assertEqual(['deepest'],
compiled_fs.GetFromFileListing(
'/apps/deepdir/deeper/').Get())
self.assertEqual(['deepest'],
compiled_fs.GetFromFileListing(
'apps/deepdir/deeper/').Get())
def testCaching(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir').Get()))
compiled_fs._file_system._obj['404.html'] = 'boom'
compiled_fs._file_system._obj['apps']['fakedir']['boom.html'] = 'blam'
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir').Get()))
compiled_fs._file_system.IncrementStat()
self.assertEqual('boom', compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html', 'boom.html')),
set(compiled_fs.GetFromFileListing('apps/fakedir').Get()))
def testFailures(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFile('405.html').Get)
# TODO(kalman): would be nice to test this fails since apps/ is a dir.
compiled_fs.GetFromFile('apps/')
#self.assertRaises(SomeError, compiled_fs.GetFromFile, 'apps/')
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('nodir/').Get)
# TODO(kalman): likewise, not a FileNotFoundError.
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('404.html').Get)
def testCorrectFutureBehaviour(self):
# Tests that the underlying FileSystem's Read Future has had Get() called
# on it before the Future is resolved, but the underlying Future isn't
# resolved until Get is.
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, lambda path, contents: contents, type(self))
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('404.html')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1, read_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFileListing('apps/')
# Current behaviour is to have read=2 and read_resolve=1 because the first
# level is read eagerly, then all of the second is read (in parallel). If
# it weren't eager (and it may be worth experimenting with that) then it'd
# be read=1 and read_resolve=0.
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
# It's doing 1 more level 'deeper' (already read 'fakedir' and 'deepdir'
# though not resolved), so that's 1 more read/resolve + the resolve from
# the first read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, read_resolve_count=2))
# Even though the directory is 1 layer deep the caller has no way of
# determining that ahead of time (though perhaps the API could give some
# kind of clue, if we really cared).
future = compiled_fs.GetFromFileListing('extensions/')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=1,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset())
# Similar configuration to the 'apps/' case but deeper.
future = compiled_fs.GetFromFileListing('/')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_count=2, read_resolve_count=3))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
yalp/fiware-orion | test/acceptance/behave/__init__.py | 4 | 1104 | # -*- coding: utf-8 -*-
"""
Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
This file is part of Orion Context Broker.
Orion Context Broker is free software: you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Orion Context Broker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
For those usages not covered by this license please contact with
iot_support at tid dot es
"""
__author__ = 'Iván Arias León (ivan dot ariasleon at telefonica dot com)'
import os
"""
Make sure the logs path exists and create it otherwise.
"""
if not os.path.exists("logs"):
os.makedirs("logs")
| agpl-3.0 |
mudbungie/NetExplorer | env/lib/python3.4/site-packages/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
RandyLowery/erpnext | erpnext/schools/doctype/course_schedule/course_schedule.py | 5 | 2222 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class CourseSchedule(Document):
def validate(self):
self.instructor_name = frappe.db.get_value("Instructor", self.instructor, "instructor_name")
self.set_title()
self.validate_mandatory()
self.validate_course()
self.set_student_batch()
self.validate_date()
self.validate_overlap()
def set_title(self):
"""Set document Title"""
self.title = self.course + " by " + (self.instructor_name if self.instructor_name else self.instructor)
def validate_mandatory(self):
if not (self.student_batch or self.student_group):
frappe.throw(_("""Student Batch or Student Group is mandatory"""))
def validate_course(self):
if self.student_group:
self.course= frappe.db.get_value("Student Group", self.student_group, "course")
def set_student_batch(self):
if self.student_group:
self.student_batch = frappe.db.get_value("Student Group", self.student_group, "student_batch")
def validate_date(self):
"""Validates if from_time is greater than to_time"""
if self.from_time > self.to_time:
frappe.throw(_("From Time cannot be greater than To Time."))
def validate_overlap(self):
"""Validates overlap for Student Group/Student Batch, Instructor, Room"""
from erpnext.schools.utils import validate_overlap_for
#Validate overlapping course schedules.
if self.student_batch:
validate_overlap_for(self, "Course Schedule", "student_batch")
if self.student_group:
validate_overlap_for(self, "Course Schedule", "student_group")
validate_overlap_for(self, "Course Schedule", "instructor")
validate_overlap_for(self, "Course Schedule", "room")
#validate overlapping assessment schedules.
if self.student_batch:
validate_overlap_for(self, "Assessment Plan", "student_batch")
if self.student_group:
validate_overlap_for(self, "Assessment Plan", "student_group")
validate_overlap_for(self, "Assessment Plan", "room")
validate_overlap_for(self, "Assessment Plan", "supervisor", self.instructor)
| gpl-3.0 |
IptvBrasilGroup/Cleitonleonelcreton.repository | plugin.video.armagedomfilmes/bs4/tests/test_lxml.py | 273 | 2965 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| gpl-2.0 |
jeffcharles/Open-Source-at-Laurier-Website | wluopensource/osl_comments/urls.py | 1 | 2435 | from django.conf.urls.defaults import *
from django.contrib.comments.urls import urlpatterns
from osl_comments.models import OslComment
urlpatterns += patterns('osl_comments.views',
(r'^comment/(?P<comment_id>\d+)/$', 'get_comment'),
(r'^delete_comment/(?P<comment_id>\d+)/$', 'delete_comment'),
(r'^deleted_comment/$', 'delete_by_user_done'),
(r'^edit/$', 'edit_comment'),
(r'^edit_form/(?P<comment_pk>\d+)/$', 'get_ajax_edit_form'),
(r'^edited/$', 'comment_edited'),
(r'^get_comments/(?P<obj_ctype_pk>\d+)/(?P<obj_pk>\d+)/(?P<order_method>newest|score|oldest)/(?P<comments_enabled>True|False)/$',
'get_comments'),
url(r'^get_comments/(?P<obj_ctype_pk>\d+)/(?P<obj_pk>\d+)/newest/(?P<comments_enabled>True|False)/$',
'get_comments', kwargs={'order_method': 'newest'},
name='get_comments_by_newest'),
url(r'^get_comments/(?P<obj_ctype_pk>\d+)/(?P<obj_pk>\d+)/score/(?P<comments_enabled>True|False)/$',
'get_comments', kwargs={'order_method': 'score'},
name='get_comments_by_score'),
url(r'^get_comments/(?P<obj_ctype_pk>\d+)/(?P<obj_pk>\d+)/oldest/(?P<comments_enabled>True|False)/$',
'get_comments', kwargs={'order_method': 'oldest'},
name='get_comments_by_oldest'),
(r'^ip_address_ban/(?P<comment_id>\d+)/$', 'update_ip_address_ban'),
(r'^ip_address_ban_update_done/$', 'update_ip_address_ban_done'),
(r'^moderate/(?P<comment_id>\d+)/$', 'moderate'),
url(r'^ocr/(\d+)/(.+)/$', 'redirect_view', name='osl-comments-url-redirect'),
(r'^post2/$', 'post_comment'),
(r'^reply_form/(?P<obj_ctype_pk>\d+)/(?P<obj_pk>\d+)/(?P<comment_pk>\d+)/$',
'get_ajax_reply_form')
)
VOTE_ON_OBJECT_URL_NAME = 'vote-comment'
urlpatterns += patterns('voting.views',
url(r'^vote/(?P<object_id>\d+)/(?P<direction>up|down|clear)/$',
'vote_on_object',
{'model': OslComment, 'template_object_name': 'comment',
'allow_xmlhttprequest': True,
'template_name': 'comments/confirm_vote.html'},
name=VOTE_ON_OBJECT_URL_NAME),
)
GET_VOTE_BOX_TEMPLATE_URL_NAME = 'get_comment_vote_box_template'
urlpatterns += patterns('osl_voting.views',
url(r'^vote_links/(?P<object_id>\d+)/$',
'get_vote_box_template',
{'model': OslComment, 'vote_url_name': VOTE_ON_OBJECT_URL_NAME,
'vote_box_url_name': GET_VOTE_BOX_TEMPLATE_URL_NAME},
name=GET_VOTE_BOX_TEMPLATE_URL_NAME)
)
| bsd-3-clause |
waytai/networkx | networkx/algorithms/cycles.py | 30 | 16789 | """
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import networkx as nx
from networkx.utils import *
from networkx.algorithms.traversal.edgedfs import helper_funcs, edge_dfs
__all__ = [
'cycle_basis','simple_cycles','recursive_simple_cycles', 'find_cycle'
]
__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
'Dan Schult <dschult@colgate.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cycle_basis(G,root=None):
""" Returns a list of cycles which form a basis for cycles of G.
A basis for cycles of a network is a minimal collection of
cycles such that any cycle in the network can be written
as a sum of cycles in the basis. Here summation of cycles
is defined as "exclusive or" of the edges. Cycle bases are
useful, e.g. when deriving equations for electric circuits
using Kirchhoff's Laws.
Parameters
----------
G : NetworkX Graph
root : node, optional
Specify starting node for basis.
Returns
-------
A list of cycle lists. Each cycle list is a list of nodes
which forms a cycle (loop) in G.
Examples
--------
>>> G=nx.Graph()
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([0,3,4,5])
>>> print(nx.cycle_basis(G,0))
[[3, 4, 5, 0], [1, 2, 3, 0]]
Notes
-----
This is adapted from algorithm CACM 491 [1]_.
References
----------
.. [1] Paton, K. An algorithm for finding a fundamental set of
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
See Also
--------
simple_cycles
"""
gnodes=set(G.nodes())
cycles=[]
while gnodes: # loop over connected components
if root is None:
root=gnodes.pop()
stack=[root]
pred={root:root}
used={root:set()}
while stack: # walk the spanning tree finding cycles
z=stack.pop() # use last-in so cycles easier to find
zused=used[z]
for nbr in G[z]:
if nbr not in used: # new node
pred[nbr]=z
stack.append(nbr)
used[nbr]=set([z])
elif nbr == z: # self loops
cycles.append([z])
elif nbr not in zused:# found a cycle
pn=used[nbr]
cycle=[nbr,z]
p=pred[z]
while p not in pn:
cycle.append(p)
p=pred[p]
cycle.append(p)
cycles.append(cycle)
used[nbr].add(z)
gnodes-=set(pred)
root=None
return cycles
@not_implemented_for('undirected')
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This is a nonrecursive, iterator/generator version of Johnson's
algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
cycle_generator: generator
A generator that produces elementary cycles of the graph. Each cycle is
a list of nodes with the first and last nodes being the same.
Examples
--------
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> len(list(nx.simple_cycles(G)))
5
To filter the cycles so that they don't include certain nodes or edges,
copy your graph and eliminate those nodes or edges before calling
>>> copyG = G.copy()
>>> copyG.remove_nodes_from([1])
>>> copyG.remove_edges_from([(0, 1)])
>>> len(list(nx.simple_cycles(copyG)))
3
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c`
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
.. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.
G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
.. [3] A search strategy for the elementary cycles of a directed graph.
J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
v. 16, no. 2, 192-204, 1976.
See Also
--------
cycle_basis
"""
def _unblock(thisnode,blocked,B):
stack=set([thisnode])
while stack:
node=stack.pop()
if node in blocked:
blocked.remove(node)
stack.update(B[node])
B[node].clear()
# Johnson's algorithm requires some ordering of the nodes.
# We assign the arbitrary ordering given by the strongly connected comps
# There is no need to track the ordering as each node removed as processed.
subG = type(G)(G.edges_iter()) # save the actual graph so we can mutate it here
# We only take the edges because we do not want to
# copy edge and node attributes here.
sccs = list(nx.strongly_connected_components(subG))
while sccs:
scc=sccs.pop()
# order of scc determines ordering of nodes
startnode = scc.pop()
# Processing node runs "circuit" routine from recursive version
path=[startnode]
blocked = set() # vertex: blocked from search?
closed = set() # nodes involved in a cycle
blocked.add(startnode)
B=defaultdict(set) # graph portions that yield no elementary circuit
stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs
while stack:
thisnode,nbrs = stack[-1]
if nbrs:
nextnode = nbrs.pop()
# print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode
# f=raw_input("pause")
if nextnode == startnode:
yield path[:]
closed.update(path)
# print "Found a cycle",path,closed
elif nextnode not in blocked:
path.append(nextnode)
stack.append( (nextnode,list(subG[nextnode])) )
closed.discard(nextnode)
blocked.add(nextnode)
continue
# done with nextnode... look for more neighbors
if not nbrs: # no more nbrs
if thisnode in closed:
_unblock(thisnode,blocked,B)
else:
for nbr in subG[thisnode]:
if thisnode not in B[nbr]:
B[nbr].add(thisnode)
stack.pop()
# assert path[-1]==thisnode
path.pop()
# done processing this node
subG.remove_node(startnode)
H=subG.subgraph(scc) # make smaller to avoid work in SCC routine
sccs.extend(list(nx.strongly_connected_components(H)))
@not_implemented_for('undirected')
def recursive_simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
A simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This version uses a recursive algorithm to build a list of cycles.
You should probably use the iterator version caled simple_cycles().
Warning: This recursive version uses lots of RAM!
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.recursive_simple_cycles(G)
[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c`
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
simple_cycles, cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path[:])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result
def find_cycle(G, source=None, orientation='original'):
"""
Returns the edges of a cycle found via a directed, depth-first traversal.
Parameters
----------
G : graph
A directed/undirected graph/multigraph.
source : node, list of nodes
The node from which the traversal begins. If ``None``, then a source
is chosen arbitrarily and repeatedly until all edges from each node in
the graph are searched.
orientation : 'original' | 'reverse' | 'ignore'
For directed graphs and directed multigraphs, edge traversals need not
respect the original orientation of the edges. When set to 'reverse',
then every edge will be traversed in the reverse direction. When set to
'ignore', then each directed edge is treated as a single undirected
edge that can be traversed in either direction. For undirected graphs
and undirected multigraphs, this parameter is meaningless and is not
consulted by the algorithm.
Returns
-------
edges : directed edges
A list of directed edges indicating the path taken for the loop. If
no cycle is found, then ``edges`` will be an empty list. For graphs, an
edge is of the form (u, v) where ``u`` and ``v`` are the tail and head
of the edge as determined by the traversal. For multigraphs, an edge is
of the form (u, v, key), where ``key`` is the key of the edge. When the
graph is directed, then ``u`` and ``v`` are always in the order of the
actual directed edge. If orientation is 'ignore', then an edge takes
the form (u, v, key, direction) where direction indicates if the edge
was followed in the forward (tail to head) or reverse (head to tail)
direction. When the direction is forward, the value of ``direction``
is 'forward'. When the direction is reverse, the value of ``direction``
is 'reverse'.
Examples
--------
In this example, we construct a DAG and find, in the first call, that there
are no directed cycles, and so an exception is raised. In the second call,
we ignore edge orientations and find that there is an undirected cycle.
Note that the second call finds a directed cycle while effectively
traversing an undirected graph, and so, we found an "undirected cycle".
This means that this DAG structure does not form a directed tree (which
is also known as a polytree).
>>> import networkx as nx
>>> G = nx.DiGraph([(0,1), (0,2), (1,2)])
>>> try:
... find_cycle(G, orientation='original')
... except:
... pass
...
>>> list(find_cycle(G, orientation='ignore'))
[(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
"""
out_edge, key, tailhead = helper_funcs(G, orientation)
explored = set()
cycle = []
final_node = None
for start_node in G.nbunch_iter(source):
if start_node in explored:
# No loop is possible.
continue
edges = []
# All nodes seen in this iteration of edge_dfs
seen = {start_node}
# Nodes in active path.
active_nodes = {start_node}
previous_node = None
for edge in edge_dfs(G, start_node, orientation):
# Determine if this edge is a continuation of the active path.
tail, head = tailhead(edge)
if previous_node is not None and tail != previous_node:
# This edge results from backtracking.
# Pop until we get a node whose head equals the current tail.
# So for example, we might have:
# (0,1), (1,2), (2,3), (1,4)
# which must become:
# (0,1), (1,4)
while True:
try:
popped_edge = edges.pop()
except IndexError:
edges = []
active_nodes = {tail}
break
else:
popped_head = tailhead(popped_edge)[1]
active_nodes.remove(popped_head)
if edges:
last_head = tailhead(edges[-1])[1]
if tail == last_head:
break
edges.append(edge)
if head in active_nodes:
# We have a loop!
cycle.extend(edges)
final_node = head
break
elif head in explored:
# Then we've already explored it. No loop is possible.
break
else:
seen.add(head)
active_nodes.add(head)
previous_node = head
if cycle:
break
else:
explored.update(seen)
else:
assert(len(cycle) == 0)
raise nx.exception.NetworkXNoCycle('No cycle found.')
# We now have a list of edges which ends on a cycle.
# So we need to remove from the beginning edges that are not relevant.
for i, edge in enumerate(cycle):
tail, head = tailhead(edge)
if tail == final_node:
break
return cycle[i:]
| bsd-3-clause |
msabramo/ansible | lib/ansible/modules/network/ovs/openvswitch_port.py | 28 | 8562 | #!/usr/bin/python
#coding: utf-8 -*-
# pylint: disable=C0111
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
tag:
version_added: 2.2
required: false
description:
- VLAN tag for this port
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: {}
description:
- Dictionary of external_ids applied to a port.
set:
version_added: 2.0
required: false
default: None
description:
- Set a single property on a port.
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: eth2
state: present
# Creates port eth6
- openvswitch_port:
bridge: bridge-loop
port: eth6
state: present
set: Interface eth6
# Creates port vlan10 with tag 10 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: vlan10
tag: 10
state: present
set: Interface vlan10
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port:
bridge: br-int
port: vifeth6
state: present
args:
external_ids:
iface-id: '{{ inventory_hostname }}-vifeth6'
attached-mac: '00:00:5E:00:53:23'
vm-id: '{{ inventory_hostname }}'
iface-status: active
'''
# pylint: disable=W0703
def truncate_before(value, srch):
""" Return content of str before the srch parameters. """
before_index = value.find(srch)
if (before_index >= 0):
return value[:before_index]
else:
return value
def _set_to_get(set_cmd, module):
""" Convert set command to get command and set value.
return tuple (get command, set value)
"""
##
# If set has option: then we want to truncate just before that.
set_cmd = truncate_before(set_cmd, " option:")
get_cmd = set_cmd.split(" ")
(key, value) = get_cmd[-1].split("=")
module.log("get commands %s " % key)
return (["--", "get"] + get_cmd[:-1] + [key], value)
# pylint: disable=R0902
class OVSPort(object):
""" Interface to OVS port. """
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.port = module.params['port']
self.tag = module.params['tag']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.set_opt = module.params.get('set', None)
def _vsctl(self, command, check_rc=True):
'''Run ovs-vsctl command'''
cmd = ['ovs-vsctl', '-t', str(self.timeout)] + command
return self.module.run_command(cmd, check_rc=check_rc)
def exists(self):
'''Check if the port already exists'''
(rtc, out, err) = self._vsctl(['list-ports', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
return any(port.rstrip() == self.port for port in out.split('\n')) or self.port == self.bridge
def set(self, set_opt):
""" Set attributes on a port. """
self.module.log("set called %s" % set_opt)
if (not set_opt):
return False
(get_cmd, set_value) = _set_to_get(set_opt, self.module)
(rtc, out, err) = self._vsctl(get_cmd, False)
if rtc != 0:
##
# ovs-vsctl -t 5 -- get Interface port external_ids:key
# returns failure if key does not exist.
out = None
else:
out = out.strip("\n")
out = out.strip('"')
if (out == set_value):
return False
(rtc, out, err) = self._vsctl(["--", "set"] + set_opt.split(" "))
if rtc != 0:
self.module.fail_json(msg=err)
return True
def add(self):
'''Add the port'''
cmd = ['add-port', self.bridge, self.port]
if self.tag:
cmd += ["tag=" + self.tag]
if self.set and self.set_opt:
cmd += ["--", "set"]
cmd += self.set_opt.split(" ")
(rtc, _, err) = self._vsctl(cmd)
if rtc != 0:
self.module.fail_json(msg=err)
return True
def delete(self):
'''Remove the port'''
(rtc, _, err) = self._vsctl(['del-port', self.bridge, self.port])
if rtc != 0:
self.module.fail_json(msg=err)
def check(self):
'''Run check mode'''
try:
if self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
else:
changed = False
except Exception:
earg = get_exception()
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
try:
if self.state == 'absent':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
##
# Add any missing ports.
if (not self.exists()):
self.add()
changed = True
##
# If the -- set changed check here and make changes
# but this only makes sense when state=present.
if (not changed):
changed = self.set(self.set_opt) or changed
items = self.module.params['external_ids'].items()
for (key, value) in items:
value = value.replace('"', '')
fmt_opt = "Interface %s external_ids:%s=%s"
external_id = fmt_opt % (self.port, key, value)
changed = self.set(external_id) or changed
##
except Exception:
earg = get_exception()
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
# pylint: disable=E0602
def main():
""" Entry point. """
module = AnsibleModule(
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
'tag': {'required': False},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'set': {'required': False, 'default': None},
'external_ids': {'default': {}, 'required': False, 'type': 'dict'},
},
supports_check_mode=True,
)
port = OVSPort(module)
if module.check_mode:
port.check()
else:
port.run()
# pylint: disable=W0614
# pylint: disable=W0401
# pylint: disable=W0622
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
tombstone/models | research/delf/delf/python/feature_aggregation_extractor_test.py | 2 | 18304 | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature aggregation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf import aggregation_config_pb2
from delf import feature_aggregation_extractor
FLAGS = flags.FLAGS
class FeatureAggregationTest(tf.test.TestCase):
def _CreateCodebook(self, checkpoint_path):
"""Creates codebook used in tests.
Args:
checkpoint_path: Directory where codebook is saved to.
"""
codebook = tf.Variable(
[[0.5, 0.5], [0.0, 0.0], [1.0, 0.0], [-0.5, -0.5], [0.0, 1.0]],
name='clusters',
dtype=tf.float32)
ckpt = tf.train.Checkpoint(codebook=codebook)
ckpt.write(checkpoint_path)
def setUp(self):
self._codebook_path = os.path.join(FLAGS.test_tmpdir, 'test_codebook')
self._CreateCodebook(self._codebook_path)
def testComputeNormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedVladWithBatchingWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.feature_batch_size = 2
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.316228, 0.316228, 0.632456, 0.632456
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 1.0, 1.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedVladMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = [1.0, 1.0, 0.0, 0.0, 0.0, 2.0, -0.5, 0.5, 0.0, 0.0]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeVladEmptyFeaturesWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
vlad, extra_output = extractor.Extract(features)
# Define expected results.
exp_vlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(vlad, exp_vlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.158114, 0.158114, 0.316228, 0.816228
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladEmptyRegionsWorks(self):
# Construct inputs.
# Empty feature array.
features = np.array([[]])
num_features_per_region = np.array([])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = np.zeros([10], dtype=float)
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllEqual(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeUnnormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = False
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.079057, 0.079057, 0.158114, 0.408114
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeNormalizedRvladSomeEmptyRegionsWorks(self):
# Construct inputs.
# 4 2-D features: 0 in first region, 3 in second region, 0 in third region,
# 1 in fourth region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([0, 3, 0, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.use_l2_normalization = True
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rvlad, extra_output = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rvlad = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.175011, 0.175011, 0.350021, 0.903453
]
exp_extra_output = -1
# Compare actual and expected results.
self.assertAllClose(rvlad, exp_rvlad)
self.assertAllEqual(extra_output, exp_extra_output)
def testComputeRvladMisconfiguredFeatures(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
# Misconfigured number of features; there are only 4 features, but
# sum(num_features_per_region) = 5.
num_features_per_region = np.array([3, 2])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.VLAD
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
with self.assertRaisesRegex(
ValueError,
r'Incorrect arguments: sum\(num_features_per_region\) and '
r'features.shape\[0\] are different'):
extractor.Extract(features, num_features_per_region)
def testComputeAsmkWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [-0.707107, 0.707107, 0.707107, 0.707107]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkStarWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk_star, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(asmk_star, exp_asmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeAsmkMultipleAssignmentWorks(self):
# Construct inputs.
# 3 2-D features.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0]], dtype=float)
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 3
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
asmk, visual_words = extractor.Extract(features)
# Define expected results.
exp_asmk = [0.707107, 0.707107, 0.0, 1.0, -0.707107, 0.707107]
exp_visual_words = [0, 2, 3]
# Compare actual and expected results.
self.assertAllClose(asmk, exp_asmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rasmk, visual_words = extractor.Extract(features, num_features_per_region)
# Define expected results.
exp_rasmk = [-0.707107, 0.707107, 0.361261, 0.932465]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllClose(rasmk, exp_rasmk)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeRasmkStarWorks(self):
# Construct inputs.
# 4 2-D features: 3 in first region, 1 in second region.
features = np.array([[1.0, 0.0], [-1.0, 0.0], [1.0, 2.0], [0.0, 2.0]],
dtype=float)
num_features_per_region = np.array([3, 1])
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = aggregation_config_pb2.AggregationConfig.ASMK_STAR
config.codebook_path = self._codebook_path
config.num_assignments = 1
config.use_regional_aggregation = True
# Run tested function.
extractor = feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
rasmk_star, visual_words = extractor.Extract(features,
num_features_per_region)
# Define expected results.
exp_rasmk_star = [64, 192]
exp_visual_words = [3, 4]
# Compare actual and expected results.
self.assertAllEqual(rasmk_star, exp_rasmk_star)
self.assertAllEqual(visual_words, exp_visual_words)
def testComputeUnknownAggregation(self):
# Construct inputs.
config = aggregation_config_pb2.AggregationConfig()
config.codebook_size = 5
config.feature_dimensionality = 2
config.aggregation_type = 0
config.codebook_path = self._codebook_path
config.use_regional_aggregation = True
# Run tested function.
with self.assertRaisesRegex(ValueError, 'Invalid aggregation type'):
feature_aggregation_extractor.ExtractAggregatedRepresentation(
config)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
tanakh/waf-unittest | gtest-1.7.0/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| bsd-3-clause |
kleientertainment/ds_mod_tools | pkg/win32/Python27/Lib/compiler/syntax.py | 25 | 1490 | """Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
| mit |
brandon-rhodes/numpy | numpy/lib/shape_base.py | 3 | 25218 | from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
# or similar with an array shaped (0,).
# There seems no need for this, so give a FutureWarning to remove later.
if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:
warnings.warn("in the future np.array_split will retain the shape of "
"arrays with a zero size, instead of replacing them by "
"`array([])`, which always has a shape of (0,).",
FutureWarning)
sub_arys = _replace_zero_by_x_arrays(sub_arys)
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A, copy=False, subok=True, ndmin=d)
shape = list(c.shape)
n = max(c.size, 1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n //= max(dim_in, 1)
return c.reshape(shape)
| bsd-3-clause |
MarkTheF4rth/youtube-dl | youtube_dl/extractor/ooyala.py | 101 | 7982 | from __future__ import unicode_literals
import re
import json
import base64
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
ExtractorError,
determine_ext,
int_or_none,
)
class OoyalaBaseIE(InfoExtractor):
def _extract_result(self, info, more_info):
embedCode = info['embedCode']
video_url = info.get('ipad_url') or info['url']
if determine_ext(video_url) == 'm3u8':
formats = self._extract_m3u8_formats(video_url, embedCode, ext='mp4')
else:
formats = [{
'url': video_url,
'ext': 'mp4',
}]
return {
'id': embedCode,
'title': unescapeHTML(info['title']),
'formats': formats,
'description': unescapeHTML(more_info['description']),
'thumbnail': more_info['promo'],
}
def _extract(self, player_url, video_id):
player = self._download_webpage(player_url, video_id)
mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
player, 'mobile player url')
# Looks like some videos are only available for particular devices
# (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0
# is only available for ipad)
# Working around with fetching URLs for all the devices found starting with 'unknown'
# until we succeed or eventually fail for each device.
devices = re.findall(r'device\s*=\s*"([^"]+)";', player)
devices.remove('unknown')
devices.insert(0, 'unknown')
for device in devices:
mobile_player = self._download_webpage(
'%s&device=%s' % (mobile_url, device), video_id,
'Downloading mobile player JS for %s device' % device)
videos_info = self._search_regex(
r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
mobile_player, 'info', fatal=False, default=None)
if videos_info:
break
if not videos_info:
formats = []
auth_data = self._download_json(
'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?domain=www.example.org&supportedFormats=mp4,webm' % (video_id, video_id),
video_id)
cur_auth_data = auth_data['authorization_data'][video_id]
for stream in cur_auth_data['streams']:
formats.append({
'url': base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8'),
'ext': stream.get('delivery_type'),
'format': stream.get('video_codec'),
'format_id': stream.get('profile'),
'width': int_or_none(stream.get('width')),
'height': int_or_none(stream.get('height')),
'abr': int_or_none(stream.get('audio_bitrate')),
'vbr': int_or_none(stream.get('video_bitrate')),
})
if formats:
return {
'id': video_id,
'formats': formats,
'title': 'Ooyala video',
}
if not cur_auth_data['authorized']:
raise ExtractorError(cur_auth_data['message'], expected=True)
if not videos_info:
raise ExtractorError('Unable to extract info')
videos_info = videos_info.replace('\\"', '"')
videos_more_info = self._search_regex(
r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"')
videos_info = json.loads(videos_info)
videos_more_info = json.loads(videos_more_info)
if videos_more_info.get('lineup'):
videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])]
return {
'_type': 'playlist',
'id': video_id,
'title': unescapeHTML(videos_more_info['title']),
'entries': videos,
}
else:
return self._extract_result(videos_info[0], videos_more_info)
class OoyalaIE(OoyalaBaseIE):
_VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
_TESTS = [
{
# From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'info_dict': {
'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
},
}, {
# Only available for ipad
'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'info_dict': {
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
'description': '',
},
},
{
# Information available only through SAS api
# From http://community.plm.automation.siemens.com/t5/News-NX-Manufacturing/Tool-Path-Divide/ba-p/4187
'url': 'http://player.ooyala.com/player.js?embedCode=FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'md5': 'a84001441b35ea492bc03736e59e7935',
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
'title': 'Ooyala video',
}
}
]
@staticmethod
def _url_for_embed_code(embed_code):
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
@classmethod
def _build_url_result(cls, embed_code):
return cls.url_result(cls._url_for_embed_code(embed_code),
ie=cls.ie_key())
def _real_extract(self, url):
embed_code = self._match_id(url)
player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
return self._extract(player_url, embed_code)
class OoyalaExternalIE(OoyalaBaseIE):
_VALID_URL = r'''(?x)
(?:
ooyalaexternal:|
https?://.+?\.ooyala\.com/.*?\bexternalId=
)
(?P<partner_id>[^:]+)
:
(?P<id>.+)
(?:
:|
.*?&pcode=
)
(?P<pcode>.+?)
(&|$)
'''
_TEST = {
'url': 'https://player.ooyala.com/player.js?externalId=espn:10365079&pcode=1kNG061cgaoolOncv54OAO1ceO-I&adSetCode=91cDU6NuXTGKz3OdjOxFdAgJVtQcKJnI&callback=handleEvents&hasModuleParams=1&height=968&playerBrandingId=7af3bd04449c444c964f347f11873075&targetReplaceId=videoPlayer&width=1656&wmode=opaque&allowScriptAccess=always',
'info_dict': {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
'description': '',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
partner_id = mobj.group('partner_id')
video_id = mobj.group('id')
pcode = mobj.group('pcode')
player_url = 'http://player.ooyala.com/player.js?externalId=%s:%s&pcode=%s' % (partner_id, video_id, pcode)
return self._extract(player_url, video_id)
| unlicense |
liu602348184/django | django/core/cache/backends/dummy.py | 629 | 1213 | "Dummy cache backend"
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
| bsd-3-clause |
idf-reading/cheat | cheat/sheets.py | 1 | 2886 | """
A container of sheets
No caching.
"""
from cheat import cheatsheets
from cheat.utils import *
import os
def default_path():
""" Returns the default cheatsheet path """
# determine the default cheatsheet dir
default_sheets_dir = os.environ.get('DEFAULT_CHEAT_DIR') or os.path.join(os.path.expanduser('~'), '.cheat')
# create the DEFAULT_CHEAT_DIR if it does not exist
if not os.path.isdir(default_sheets_dir):
try:
# @kludge: unclear on why this is necessary
os.umask(0000)
os.mkdir(default_sheets_dir)
except OSError:
die('Could not create DEFAULT_CHEAT_DIR')
# assert that the DEFAULT_CHEAT_DIR is readable and writable
if not os.access(default_sheets_dir, os.R_OK):
die('The DEFAULT_CHEAT_DIR (' + default_sheets_dir +') is not readable.')
if not os.access(default_sheets_dir, os.W_OK):
die('The DEFAULT_CHEAT_DIR (' + default_sheets_dir +') is not writeable.')
# return the default dir
return default_sheets_dir
def get():
"""
Assembles a dictionary of cheatsheets as name => file-path
Accessible by the main cmd chain.
"""
cheats = {} # gets every time
# otherwise, scan the filesystem
for cheat_dir in reversed(paths()):
cheats.update(
dict([
(cheat, os.path.join(cheat_dir, cheat)) # name => path
for cheat in os.listdir(cheat_dir)
if not cheat.startswith('.')
and not cheat.startswith('__')
])
)
return cheats
def paths():
"""
Assembles a list of directories containing cheatsheets
traversal of file system for multiple locations containing cheatsheets
"""
sheet_paths = [
default_path(),
cheatsheets.sheets_dir()[0],
]
# merge the CHEATPATH paths into the sheet_paths
if 'CHEATPATH' in os.environ and os.environ['CHEATPATH']:
for path in os.environ['CHEATPATH'].split(os.pathsep):
if os.path.isdir(path):
sheet_paths.append(path)
if not sheet_paths:
die('The DEFAULT_CHEAT_DIR dir does not exist or the CHEATPATH is not set.')
return sheet_paths
def list():
""" Lists the available cheatsheets """
sheet_list = ''
pad_length = max([len(x) for x in get().keys()]) + 4
for sheet in sorted(get().items()):
sheet_list += sheet[0].ljust(pad_length) + sheet[1] + "\n"
return sheet_list
def search(term):
""" Searches all cheatsheets for the specified term """
result = ''
for cheatsheet in sorted(get().items()):
match = ''
for line in open(cheatsheet[1]):
if term in line:
match += ' ' + line
if not match == '':
result += cheatsheet[0] + ":\n" + match + "\n"
return result
| gpl-3.0 |
helloiloveit/VkxPhoneProject | submodules/externals/antlr3/runtime/Python/tests/t060leftrecursion.py | 16 | 14776 | import unittest
import re
import textwrap
import antlr3
import testbase
# Left-recursion resolution is not yet enabled in the tool.
# class TestLeftRecursion(testbase.ANTLRTest):
# def parserClass(self, base):
# class TParser(base):
# def __init__(self, *args, **kwargs):
# base.__init__(self, *args, **kwargs)
# self._output = ""
# def capture(self, t):
# self._output += str(t)
# def recover(self, input, re):
# # no error recovery yet, just crash!
# raise
# return TParser
# def execParser(self, grammar, grammarEntry, input):
# lexerCls, parserCls = self.compileInlineGrammar(grammar)
# cStream = antlr3.StringStream(input)
# lexer = lexerCls(cStream)
# tStream = antlr3.CommonTokenStream(lexer)
# parser = parserCls(tStream)
# getattr(parser, grammarEntry)()
# return parser._output
# def runTests(self, grammar, tests, grammarEntry):
# lexerCls, parserCls = self.compileInlineGrammar(grammar)
# build_ast = re.search(r'output\s*=\s*AST', grammar)
# for input, expecting in tests:
# cStream = antlr3.StringStream(input)
# lexer = lexerCls(cStream)
# tStream = antlr3.CommonTokenStream(lexer)
# parser = parserCls(tStream)
# r = getattr(parser, grammarEntry)()
# found = parser._output
# if build_ast:
# found += r.tree.toStringTree()
# self.assertEquals(
# expecting, found,
# "%r != %r (for input %r)" % (expecting, found, input))
# def testSimple(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# }
# s : a { self.capture($a.text) } ;
# a : a ID
# | ID
# ;
# ID : 'a'..'z'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# found = self.execParser(grammar, 's', 'a b c')
# expecting = "abc"
# self.assertEquals(expecting, found)
# def testSemPred(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# }
# s : a { self.capture($a.text) } ;
# a : a {True}? ID
# | ID
# ;
# ID : 'a'..'z'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# found = self.execParser(grammar, "s", "a b c")
# expecting = "abc"
# self.assertEquals(expecting, found)
# def testTernaryExpr(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# e : e '*'^ e
# | e '+'^ e
# | e '?'<assoc=right>^ e ':'! e
# | e '='<assoc=right>^ e
# | ID
# ;
# ID : 'a'..'z'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("a+b", "(+ a b)"),
# ("a*b", "(* a b)"),
# ("a?b:c", "(? a b c)"),
# ("a=b=c", "(= a (= b c))"),
# ("a?b+c:d", "(? a (+ b c) d)"),
# ("a?b=c:d", "(? a (= b c) d)"),
# ("a? b?c:d : e", "(? a (? b c d) e)"),
# ("a?b: c?d:e", "(? a b (? c d e))"),
# ]
# self.runTests(grammar, tests, "e")
# def testDeclarationsUsingASTOperators(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# declarator
# : declarator '['^ e ']'!
# | declarator '['^ ']'!
# | declarator '('^ ')'!
# | '*'^ declarator // binds less tight than suffixes
# | '('! declarator ')'!
# | ID
# ;
# e : INT ;
# ID : 'a'..'z'+ ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("*a", "(* a)"),
# ("**a", "(* (* a))"),
# ("a[3]", "([ a 3)"),
# ("b[]", "([ b)"),
# ("(a)", "a"),
# ("a[]()", "(( ([ a))"),
# ("a[][]", "([ ([ a))"),
# ("*a[]", "(* ([ a))"),
# ("(*a)[]", "([ (* a))"),
# ]
# self.runTests(grammar, tests, "declarator")
# def testDeclarationsUsingRewriteOperators(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# declarator
# : declarator '[' e ']' -> ^('[' declarator e)
# | declarator '[' ']' -> ^('[' declarator)
# | declarator '(' ')' -> ^('(' declarator)
# | '*' declarator -> ^('*' declarator) // binds less tight than suffixes
# | '(' declarator ')' -> declarator
# | ID -> ID
# ;
# e : INT ;
# ID : 'a'..'z'+ ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("*a", "(* a)"),
# ("**a", "(* (* a))"),
# ("a[3]", "([ a 3)"),
# ("b[]", "([ b)"),
# ("(a)", "a"),
# ("a[]()", "(( ([ a))"),
# ("a[][]", "([ ([ a))"),
# ("*a[]", "(* ([ a))"),
# ("(*a)[]", "([ (* a))"),
# ]
# self.runTests(grammar, tests, "declarator")
# def testExpressionsUsingASTOperators(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# e : e '.'^ ID
# | e '.'^ 'this'
# | '-'^ e
# | e '*'^ e
# | e ('+'^|'-'^) e
# | INT
# | ID
# ;
# ID : 'a'..'z'+ ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("1", "1"),
# ("a+1", "(+ a 1)"),
# ("a*1", "(* a 1)"),
# ("a.b", "(. a b)"),
# ("a.this", "(. a this)"),
# ("a-b+c", "(+ (- a b) c)"),
# ("a+b*c", "(+ a (* b c))"),
# ("a.b+1", "(+ (. a b) 1)"),
# ("-a", "(- a)"),
# ("-a+b", "(+ (- a) b)"),
# ("-a.b", "(- (. a b))"),
# ]
# self.runTests(grammar, tests, "e")
# @testbase.broken(
# "Grammar compilation returns errors", testbase.GrammarCompileError)
# def testExpressionsUsingRewriteOperators(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# e : e '.' ID -> ^('.' e ID)
# | e '.' 'this' -> ^('.' e 'this')
# | '-' e -> ^('-' e)
# | e '*' b=e -> ^('*' e $b)
# | e (op='+'|op='-') b=e -> ^($op e $b)
# | INT -> INT
# | ID -> ID
# ;
# ID : 'a'..'z'+ ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("1", "1"),
# ("a+1", "(+ a 1)"),
# ("a*1", "(* a 1)"),
# ("a.b", "(. a b)"),
# ("a.this", "(. a this)"),
# ("a+b*c", "(+ a (* b c))"),
# ("a.b+1", "(+ (. a b) 1)"),
# ("-a", "(- a)"),
# ("-a+b", "(+ (- a) b)"),
# ("-a.b", "(- (. a b))"),
# ]
# self.runTests(grammar, tests, "e")
# def testExpressionAssociativity(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# e
# : e '.'^ ID
# | '-'^ e
# | e '^'<assoc=right>^ e
# | e '*'^ e
# | e ('+'^|'-'^) e
# | e ('='<assoc=right>^ |'+='<assoc=right>^) e
# | INT
# | ID
# ;
# ID : 'a'..'z'+ ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("1", "1"),
# ("a+1", "(+ a 1)"),
# ("a*1", "(* a 1)"),
# ("a.b", "(. a b)"),
# ("a-b+c", "(+ (- a b) c)"),
# ("a+b*c", "(+ a (* b c))"),
# ("a.b+1", "(+ (. a b) 1)"),
# ("-a", "(- a)"),
# ("-a+b", "(+ (- a) b)"),
# ("-a.b", "(- (. a b))"),
# ("a^b^c", "(^ a (^ b c))"),
# ("a=b=c", "(= a (= b c))"),
# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
# ]
# self.runTests(grammar, tests, "e")
# def testJavaExpressions(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# expressionList
# : e (','! e)*
# ;
# e : '('! e ')'!
# | 'this'
# | 'super'
# | INT
# | ID
# | type '.'^ 'class'
# | e '.'^ ID
# | e '.'^ 'this'
# | e '.'^ 'super' '('^ expressionList? ')'!
# | e '.'^ 'new'^ ID '('! expressionList? ')'!
# | 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+) // ugly; simplified
# | e '['^ e ']'!
# | '('^ type ')'! e
# | e ('++'^ | '--'^)
# | e '('^ expressionList? ')'!
# | ('+'^|'-'^|'++'^|'--'^) e
# | ('~'^|'!'^) e
# | e ('*'^|'/'^|'%'^) e
# | e ('+'^|'-'^) e
# | e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e
# | e ('<='^ | '>='^ | '>'^ | '<'^) e
# | e 'instanceof'^ e
# | e ('=='^ | '!='^) e
# | e '&'^ e
# | e '^'<assoc=right>^ e
# | e '|'^ e
# | e '&&'^ e
# | e '||'^ e
# | e '?' e ':' e
# | e ('='<assoc=right>^
# |'+='<assoc=right>^
# |'-='<assoc=right>^
# |'*='<assoc=right>^
# |'/='<assoc=right>^
# |'&='<assoc=right>^
# |'|='<assoc=right>^
# |'^='<assoc=right>^
# |'>>='<assoc=right>^
# |'>>>='<assoc=right>^
# |'<<='<assoc=right>^
# |'%='<assoc=right>^) e
# ;
# type: ID
# | ID '['^ ']'!
# | 'int'
# | 'int' '['^ ']'!
# ;
# ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("a", "a"),
# ("1", "1"),
# ("a+1", "(+ a 1)"),
# ("a*1", "(* a 1)"),
# ("a.b", "(. a b)"),
# ("a-b+c", "(+ (- a b) c)"),
# ("a+b*c", "(+ a (* b c))"),
# ("a.b+1", "(+ (. a b) 1)"),
# ("-a", "(- a)"),
# ("-a+b", "(+ (- a) b)"),
# ("-a.b", "(- (. a b))"),
# ("a^b^c", "(^ a (^ b c))"),
# ("a=b=c", "(= a (= b c))"),
# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
# ("a|b&c", "(| a (& b c))"),
# ("(a|b)&c", "(& (| a b) c)"),
# ("a > b", "(> a b)"),
# ("a >> b", "(> a b)"), # text is from one token
# ("a < b", "(< a b)"),
# ("(T)x", "(( T x)"),
# ("new A().b", "(. (new A () b)"),
# ("(T)t.f()", "(( (( T (. t f)))"),
# ("a.f(x)==T.c", "(== (( (. a f) x) (. T c))"),
# ("a.f().g(x,1)", "(( (. (( (. a f)) g) x 1)"),
# ("new T[((n-1) * x) + 1]", "(new T [ (+ (* (- n 1) x) 1))"),
# ]
# self.runTests(grammar, tests, "e")
# def testReturnValueAndActions(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# }
# s : e { self.capture($e.v) } ;
# e returns [v, ignored]
# : e '*' b=e {$v *= $b.v;}
# | e '+' b=e {$v += $b.v;}
# | INT {$v = int($INT.text);}
# ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("4", "4"),
# ("1+2", "3")
# ]
# self.runTests(grammar, tests, "s")
# def testReturnValueAndActionsAndASTs(self):
# grammar = textwrap.dedent(
# r"""
# grammar T;
# options {
# language=Python;
# output=AST;
# }
# s : e { self.capture("v=\%s, " \% $e.v) } ;
# e returns [v, ignored]
# : e '*'^ b=e {$v *= $b.v;}
# | e '+'^ b=e {$v += $b.v;}
# | INT {$v = int($INT.text);}
# ;
# INT : '0'..'9'+ ;
# WS : (' '|'\n') {self.skip()} ;
# """)
# tests = [
# ("4", "v=4, 4"),
# ("1+2", "v=3, (+ 1 2)"),
# ]
# self.runTests(grammar, tests, "s")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ChinaQuants/bokeh | examples/charts/file/bar_multi.py | 4 | 1665 | from bokeh.charts import Bar, output_file, show, vplot, hplot, defaults
from bokeh.sampledata.autompg import autompg as df
df['neg_mpg'] = 0 - df['mpg']
defaults.width = 450
defaults.height = 350
bar_plot = Bar(df, label='cyl', title="label='cyl'")
bar_plot2 = Bar(df, label='cyl', bar_width=0.4, title="label='cyl' bar_width=0.4")
bar_plot3 = Bar(df, label='cyl', values='mpg', agg='mean',
title="label='cyl' values='mpg' agg='mean'")
bar_plot4 = Bar(df, label='cyl', title="label='cyl' color='DimGray'", color='dimgray')
# multiple columns
bar_plot5 = Bar(df, label=['cyl', 'origin'], values='mpg', agg='mean',
title="label=['cyl', 'origin'] values='mpg' agg='mean'")
bar_plot6 = Bar(df, label='origin', values='mpg', agg='mean', stack='cyl',
title="label='origin' values='mpg' agg='mean' stack='cyl'",
legend='top_right')
bar_plot7 = Bar(df, label='cyl', values='displ', agg='mean', group='origin',
title="label='cyl' values='displ' agg='mean' group='origin'",
legend='top_right')
bar_plot8 = Bar(df, label='cyl', values='neg_mpg', agg='mean', group='origin',
color='origin', legend='top_right',
title="label='cyl' values='neg_mpg' agg='mean' group='origin'")
# infer labels from index
df = df.set_index('cyl')
bar_plot9 = Bar(df, values='mpg', agg='mean', legend='top_right', title='inferred labels')
# collect and display
output_file("bar_multi.html")
show(
vplot(
hplot(bar_plot, bar_plot2, bar_plot3),
hplot(bar_plot4, bar_plot5, bar_plot6),
hplot(bar_plot7, bar_plot8, bar_plot9)
)
)
| bsd-3-clause |
vbannai/neutron | neutron/db/migration/__init__.py | 8 | 2135 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from alembic import op
import sqlalchemy as sa
OVS_PLUGIN = ('neutron.plugins.openvswitch.ovs_neutron_plugin'
'.OVSNeutronPluginV2')
CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2'
def should_run(active_plugins, migrate_plugins):
if '*' in migrate_plugins:
return True
else:
if (CISCO_PLUGIN not in migrate_plugins and
OVS_PLUGIN in migrate_plugins):
migrate_plugins.append(CISCO_PLUGIN)
return set(active_plugins) & set(migrate_plugins)
def alter_enum(table, column, enum_type, nullable):
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
op.execute("UPDATE %(table)s SET %(column)s = "
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable)
| apache-2.0 |
boberfly/gaffer | python/GafferTest/NameValuePlugTest.py | 4 | 14807 | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
class NameValuePlugTest( GafferTest.TestCase ) :
def assertPlugSerialises( self, plug ):
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = plug
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["p"].getName(), plug.getName() )
self.assertEqual( s2["n"]["p"].direction(), plug.direction() )
self.assertEqual( s2["n"]["p"].getFlags(), plug.getFlags() )
self.assertEqual( s2["n"]["p"].keys(), plug.keys() )
self.assertEqual( s2["n"]["p"]["value"].getValue(), plug["value"].getValue() )
self.assertEqual( s2["n"]["p"]["value"].defaultValue(), plug["value"].defaultValue() )
self.assertEqual( s2["n"]["p"]["name"].getValue(), plug["name"].getValue() )
self.assertEqual( s2["n"]["p"]["name"].defaultValue(), plug["name"].defaultValue() )
if "enable" in plug.keys():
self.assertEqual( s2["n"]["p"]["enable"].getValue(), plug["enable"].getValue() )
self.assertEqual( s2["n"]["p"]["enable"].defaultValue(), plug["enable"].defaultValue() )
if isinstance( plug, Gaffer.IntPlug ):
self.assertEqual( s2["n"]["p"]["value"].minValue(), plug.minValue() )
self.assertEqual( s2["n"]["p"]["value"].maxValue(), plug.maxValue() )
def assertCounterpart( self, plug ):
p2 = plug.createCounterpart( "testName", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "testName" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getFlags(), plug.getFlags() )
self.assertEqual( p2.keys(), plug.keys() )
if "value" in plug.keys():
self.assertEqual( p2["value"].getValue(), plug["value"].getValue() )
self.assertEqual( p2["value"].defaultValue(), plug["value"].defaultValue() )
if "name" in plug.keys():
self.assertEqual( p2["name"].getValue(), plug["name"].getValue() )
self.assertEqual( p2["name"].defaultValue(), plug["name"].defaultValue() )
if "enable" in plug.keys():
self.assertEqual( p2["enable"].getValue(), plug["enable"].getValue() )
self.assertEqual( p2["enable"].defaultValue(), plug["enable"].defaultValue() )
if isinstance( plug, Gaffer.IntPlug ):
self.assertEqual( p2.minValue(), plug.minValue() )
self.assertEqual( p2.maxValue(), plug.maxValue() )
def test( self ) :
constructed = {}
constructed["defaults"] = {}
constructed["specified"] = {}
constructed["defaults"]["empty"] = Gaffer.NameValuePlug()
constructed["defaults"]["partialEmpty"] = Gaffer.NameValuePlug()
constructed["defaults"]["partialEmpty"].addChild( Gaffer.StringPlug( "name", defaultValue = "key") )
# Note that if we specify the direction and flags without specifying argument names, this is ambiguous
# with the later forms of the constructor. I guess this is OK since the old serialised forms
# of MemberPlug do include the argument names, and we want to deprecate this form anyway
constructed["specified"]["empty"] = Gaffer.NameValuePlug( "foo", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["specified"]["partialEmpty"] = Gaffer.NameValuePlug( "foo", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["specified"]["partialEmpty"].addChild( Gaffer.StringPlug( "name", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = "key" ) )
constructed["defaults"]["fromData"] = Gaffer.NameValuePlug( "key", IECore.IntData(42) )
constructed["specified"]["fromData"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), "foo", Gaffer.Plug.Direction.Out, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["defaults"]["fromPlug"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( minValue = -3, maxValue = 5) )
constructed["specified"]["fromPlug"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), "foo" )
constructed["defaults"]["fromDataEnable"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), True )
constructed["specified"]["fromDataEnable"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), True, "foo", Gaffer.Plug.Direction.Out, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["defaults"]["fromPlugEnable"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug(), True )
constructed["specified"]["fromPlugEnable"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( minValue = -7, maxValue = 15, direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) , True, "foo" )
for k in [ "empty", "fromData", "fromPlug", "fromDataEnable", "fromPlugEnable" ]:
defa = constructed["defaults"][k]
spec = constructed["specified"][k]
numChildren = 3 if "Enable" in k else 2
if k == "empty":
numChildren = 0
self.assertEqual( len( spec.children() ), numChildren )
self.assertEqual( len( defa.children() ), numChildren )
self.assertEqual( defa.getName(), "NameValuePlug" )
self.assertEqual( spec.getName(), "foo" )
self.assertEqual( defa.direction(), Gaffer.Plug.Direction.In )
self.assertEqual( spec.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( defa.getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( spec.getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
if k == "empty":
self.assertNotIn( "name", defa )
self.assertNotIn( "name", spec )
self.assertNotIn( "value", defa )
self.assertNotIn( "value", spec )
elif k == "partialEmpty":
self.assertEqual( defa["name"].getValue(), "key" )
self.assertEqual( spec["name"].getValue(), "key" )
self.assertNotIn( "value", defa )
self.assertNotIn( "value", spec )
else:
self.assertEqual( defa["name"].getValue(), "key" )
self.assertEqual( spec["name"].getValue(), "key" )
if "fromPlug" in k:
self.assertEqual( defa["value"].getValue(), 0 )
self.assertEqual( spec["value"].getValue(), 0 )
else:
self.assertEqual( defa["value"].getValue(), 42 )
self.assertEqual( spec["value"].getValue(), 42 )
if k == "empty":
# A completely empty NameValuePlug is invalid, but we have to partially
# support it because old serialisation code will create these before
# the addChild's run to create name and value
self.assertCounterpart( defa )
self.assertCounterpart( spec )
# We shouldn't ever serialise invalid plugs though - if the children
# haven't been created by the time we try to serialise, that's a bug
self.assertRaises( RuntimeError, self.assertPlugSerialises, spec )
elif k == "partialEmpty":
# A NameValuePlug with a name but no value, on the other hand, is just
# broken
self.assertRaises( RuntimeError, self.assertPlugSerialises, spec )
self.assertRaises( RuntimeError, self.assertCounterpart, defa )
self.assertRaises( RuntimeError, self.assertCounterpart, spec )
else:
self.assertPlugSerialises( spec )
self.assertCounterpart( defa )
self.assertCounterpart( spec )
def testBasicRepr( self ) :
p = Gaffer.NameValuePlug( "key", IECore.StringData( "value" ) )
self.assertEqual(
repr( p ),
'Gaffer.NameValuePlug( "key", Gaffer.StringPlug( "value", defaultValue = \'value\', ), "NameValuePlug", Gaffer.Plug.Flags.Default )'
)
def testEmptyPlugRepr( self ) :
# Use the deprecated constructor to create a NameValuePlug without name or value
p = Gaffer.NameValuePlug( "mm", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertRaises( RuntimeError, repr, p )
def testValueTypes( self ) :
for v in [
IECore.FloatVectorData( [ 1, 2, 3 ] ),
IECore.IntVectorData( [ 1, 2, 3 ] ),
IECore.StringVectorData( [ "1", "2", "3" ] ),
IECore.V3fVectorData( [ imath.V3f( x ) for x in range( 1, 5 ) ] ),
IECore.Color3fVectorData( [ imath.Color3f( x ) for x in range( 1, 5 ) ] ),
IECore.M44fVectorData( [ imath.M44f() * x for x in range( 1, 5 ) ] ),
IECore.V2iVectorData( [ imath.V2i( x ) for x in range( 1, 5 ) ] ),
IECore.V3fData( imath.V3f( 1, 2, 3 ) ),
IECore.V2fData( imath.V2f( 1, 2 ) ),
IECore.M44fData( imath.M44f( *range(16) ) ),
IECore.Box2fData( imath.Box2f( imath.V2f( 0, 1 ), imath.V2f( 1, 2 ) ) ),
IECore.Box2iData( imath.Box2i( imath.V2i( -1, 10 ), imath.V2i( 11, 20 ) ) ),
IECore.Box3fData( imath.Box3f( imath.V3f( 0, 1, 2 ), imath.V3f( 3, 4, 5 ) ) ),
IECore.Box3iData( imath.Box3i( imath.V3i( 0, 1, 2 ), imath.V3i( 3, 4, 5 ) ) ),
IECore.InternedStringVectorData( [ "a", "b" ] )
]:
if 'value' in dir( v ):
expected = v.value
else:
expected = v
self.assertEqual( expected, Gaffer.NameValuePlug( "test", v )["value"].getValue() )
def testTransformPlug( self ) :
p = Gaffer.NameValuePlug( "a", Gaffer.TransformPlug() )
self.assertEqual( p["value"].matrix(), imath.M44f() )
def testAdditionalChildrenRejected( self ) :
m = Gaffer.NameValuePlug( "a", IECore.IntData( 10 ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug() )
self.assertRaises( RuntimeError, m.addChild, Gaffer.StringPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "value" ) )
def testDefaultValues( self ) :
m = Gaffer.NameValuePlug( "a", IECore.IntData( 10 ) )
self.assertTrue( m["value"].defaultValue(), 10 )
self.assertTrue( m["value"].getValue(), 10 )
self.assertTrue( m["name"].defaultValue(), "a" )
self.assertTrue( m["name"].getValue(), "a" )
m = Gaffer.NameValuePlug( "b", IECore.FloatData( 20 ) )
self.assertTrue( m["value"].defaultValue(), 20 )
self.assertTrue( m["value"].getValue(), 20 )
self.assertTrue( m["name"].defaultValue(), "b" )
self.assertTrue( m["name"].getValue(), "b" )
m = Gaffer.NameValuePlug( "c", IECore.StringData( "abc" ) )
self.assertTrue( m["value"].defaultValue(), "abc" )
self.assertTrue( m["value"].getValue(), "abc" )
self.assertTrue( m["name"].defaultValue(), "c" )
self.assertTrue( m["name"].getValue(), "c" )
def testNonValuePlugs( self ) :
p1 = Gaffer.NameValuePlug( "name", Gaffer.Plug(), name = "p1", defaultEnabled = False )
p2 = p1.createCounterpart( "p2", Gaffer.Plug.Direction.In )
self.assertTrue( p1.settable() )
self.assertTrue( p2.settable() )
p2.setInput( p1 )
self.assertEqual( p2["name"].getInput(), p1["name"] )
self.assertEqual( p2["value"].getInput(), p1["value"] )
self.assertTrue( p1.settable() )
self.assertFalse( p2.settable() )
p2.setInput( None )
self.assertTrue( p2.settable() )
self.assertTrue( p1.isSetToDefault() )
p1["name"].setValue( "nonDefault" )
self.assertFalse( p1.isSetToDefault() )
p1.setToDefault()
self.assertTrue( p1.isSetToDefault() )
p1["name"].setValue( "nonDefault" )
p1["enabled"].setValue( True )
p2.setFrom( p1 )
self.assertEqual( p2["name"].getValue(), p1["name"].getValue() )
self.assertEqual( p2["enabled"].getValue(), p1["enabled"].getValue() )
self.assertEqual( p1.hash(), p2.hash() )
p2["enabled"].setValue( False )
self.assertNotEqual( p1.hash(), p2.hash() )
def testDynamicFlags( self ) :
def assertFlags( script ) :
self.assertEqual( script["n"]["user"]["p1"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( script["n"]["user"]["p1"]["name"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p1"]["value"].getFlags(), Gaffer.Plug.Flags.Default )
c = script["n"]["user"]["p1"].createCounterpart( "c", Gaffer.Plug.Direction.In )
self.assertEqual( c.getFlags(), script["n"]["user"]["p1"].getFlags() )
self.assertEqual( script["n"]["user"]["p2"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( script["n"]["user"]["p2"]["name"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p2"]["value"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p2"]["enabled"].getFlags(), Gaffer.Plug.Flags.Default )
c = script["n"]["user"]["p2"].createCounterpart( "c", Gaffer.Plug.Direction.In )
self.assertEqual( c.getFlags(), script["n"]["user"]["p2"].getFlags() )
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p1"] = Gaffer.NameValuePlug( "name1", Gaffer.IntPlug( defaultValue = 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["p2"] = Gaffer.NameValuePlug( "name2", Gaffer.IntPlug( defaultValue = 1 ), defaultEnabled = False, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
assertFlags( s )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertFlags( s2 )
s3 = Gaffer.ScriptNode()
s3.execute( s2.serialise() )
assertFlags( s3 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Kallehz/Python | Próf2/3.py | 1 | 2505 | # Numbers game
#
# Hawk and his little brother, Stone, are playing a little game with
# the following rules. Initially 8 random integers, from 1 to 100 (inclusive),
# are laid on the table for both players to see. The players then have
# 2 minutes to construct a sequence of numbers, from the given 8 numbers, with the following conditions.
#
# The sum of two adjacent numbers cannot be divisible by 3.
# Immediately to the right of each even number must
# be an odd number or an even number that starts with the digit 1.
# Immediately to the right of each odd number must be either a larger
# even number or an odd number that ends with the digit 5.
# The player who constructs the longer sequence wins.
# Just before the two minutes are up, Stone notices that he
# can construct a legal sequence using all 8 numbers. However, in all the excitement,
# he knocks all the numbers off the table, and cannot remember how to construct the sequence.
# Hawk is very sceptical of his brother, so he asks you to write a program that determines whether
# his brother is telling the truth or not.
#
# Write a function numbers_game that takes a list of 8 integers
# (in the range from 1 to 100) as input. The function returns
# True if it is possible to construct a legal sequence from the 8 integers;
# False otherwise.
from itertools import permutations
def numbers_game(lis):
for perm in permutations(lis):
valid = True
for i in range(0, len(lis)-1):
# The sum of two adjacent numbers cannot be divisible by 3.
if not ((perm[i] + perm[i+1]) % 3 != 0):
valid = False
# Immediately to the right of each even number must be an odd number or an even number that starts with the digit 1.
if perm[i] % 2 == 0:
if not((perm[i+1] % 2 != 0) or (perm[i+1] % 2 == 0 and str(perm[i+1]).startswith('1'))):
valid = False
# Immediately to the right of each odd number must be either a larger even number or an odd number that ends with the digit 5.
if perm[i] % 2 != 0:
if not((perm[i+1] % 2 == 0 and perm[i+1] > perm[i]) or (perm[i+1] % 2 != 0 and str(perm[i+1]).endswith('5'))):
valid = False
if valid:
return True
return valid
#
print(numbers_game([68, 45, 19, 54, 56, 51, 94, 7]))
# # True
print(numbers_game([3, 2, 4, 6, 3, 3, 15, 3]))
# # False
print(numbers_game([61, 50, 35, 53, 5, 45, 41, 6]))
# True | apache-2.0 |
splanger/dbmake | dbmake/dbmake.py | 1 | 1926 | #!/usr/bin/python
import sys
from .common import FAILURE, SUCCESS
from .dbmake_cli import get_command, print_help, get_command_class_reference
from .common import CommandNotExists, BadCommandArguments, DBMAKE_VERSION
class App:
def __init__(self):
self.ide_stop_bothering_with_static_method = "!!!"
def run(self, args=sys.argv):
self.ide_stop_bothering_with_static_method = "!!!"
# Pop the script's name from arguments and fetch the command name
try:
args.pop(0)
command_name = args.pop(0)
except IndexError:
print_help()
return FAILURE
# Print help if necessary
if command_name == '-h' or command_name == '--help':
if args.__len__() > 0:
command_name = args.pop(0)
try:
command_class = get_command_class_reference(command_name)
command_class.print_help()
except AttributeError:
print("Error! No such a command %s" % command_name)
print_help()
return FAILURE
return SUCCESS
else:
print_help()
return SUCCESS
elif command_name == '-v' or command_name == '--version':
print(DBMAKE_VERSION)
return SUCCESS
# Get a command instance to execute and execute it
try:
command = get_command(command_name, args)
result = command.execute()
except CommandNotExists:
print("Unrecognized command!")
print_help()
return FAILURE
except BadCommandArguments:
print("Bad command arguments")
return FAILURE
return result
def main(argv=sys.argv):
app = App()
sys.exit(app.run(argv))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| apache-2.0 |
huongttlan/bokeh | examples/plotting/file/glyphs.py | 43 | 4021 | import numpy as np
from bokeh.plotting import figure, show, output_file, vplot
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
xpts = np.array([-.09, -.12, .0, .12, .09])
ypts = np.array([-.1, .02, .1, .02, -.1])
output_file("glyphs.html", title="glyphs.py example")
vplot = vplot()
p = figure(title="annular_wedge")
p.annular_wedge(x, y, 10, 20, 0.6, 4.1, color="#8888ee",
inner_radius_units="screen", outer_radius_units="screen")
vplot.children.append(p)
p = figure(title="annular_wedge")
p.annulus(x, y, 10, 20, color="#7FC97F",
inner_radius_units="screen", outer_radius_units = "screen")
vplot.children.append(p)
p = figure(title="arc")
p.arc(x, y, 20, 0.6, 4.1, radius_units="screen", color="#BEAED4", line_width=3)
vplot.children.append(p)
p = figure(title="bezier")
p.bezier(x, y, x+0.2, y, x+0.1, y+0.1, x-0.1, y-0.1, color="#D95F02", line_width=2)
vplot.children.append(p)
p = figure(title="circle")
p.circle(x, y, radius=0.1, color="#3288BD")
vplot.children.append(p)
p = figure(title="line")
p.line(x, y, color="#F46D43")
vplot.children.append(p)
p = figure(title="multi_line")
p.multi_line([xpts+xx for xx in x], [ypts+yy for yy in y],
color="#8073AC", line_width=2)
vplot.children.append(p)
p = figure(title="oval")
p.oval(x, y, 15, 25, angle=-0.7, color="#1D91C0",
width_units="screen", height_units="screen")
vplot.children.append(p)
p = figure(title="patch")
p.patch(x, y, color="#A6CEE3")
vplot.children.append(p)
p = figure(title="patches")
p.patches([xpts+xx for xx in x], [ypts+yy for yy in y], color="#FB9A99")
vplot.children.append(p)
p = figure(title="quad")
p.quad(x, x-0.1, y, y-0.1, color="#B3DE69")
vplot.children.append(p)
p = figure(title="quadratic")
p.quadratic(x, y, x+0.2, y, x+0.3, y+1.4, color="#4DAF4A", line_width=3)
vplot.children.append(p)
p = figure(title="ray")
p.ray(x, y, 45, -0.7, color="#FB8072", line_width=2)
vplot.children.append(p)
p = figure(title="rect")
p.rect(x, y, 10, 20, color="#CAB2D6", width_units="screen", height_units="screen")
vplot.children.append(p)
p = figure(title="segment")
p.segment(x, y, x-0.1, y-0.1, color="#F4A582", line_width=3)
vplot.children.append(p)
p = figure(title="square")
p.square(x, y, size=sizes, color="#74ADD1")
vplot.children.append(p)
p = figure(title="wedge")
p.wedge(x, y, 15, 0.6, 4.1, radius_units="screen", color="#B3DE69")
vplot.children.append(p)
p = figure(title="circle_x")
p.scatter(x, y, marker="circle_x", size=sizes, color="#DD1C77", fill_color=None)
vplot.children.append(p)
p = figure(title="triangle")
p.scatter(x, y, marker="triangle", size=sizes, color="#99D594", line_width=2)
vplot.children.append(p)
p = figure(title="circle")
p.scatter(x, y, marker="o", size=sizes, color="#80B1D3", line_width=3)
vplot.children.append(p)
p = figure(title="cross")
p.scatter(x, y, marker="cross", size=sizes, color="#E6550D", line_width=2)
vplot.children.append(p)
p = figure(title="diamond")
p.scatter(x, y, marker="diamond", size=sizes, color="#1C9099", line_width=2)
vplot.children.append(p)
p = figure(title="inverted_triangle")
p.scatter(x, y, marker="inverted_triangle", size=sizes, color="#DE2D26")
vplot.children.append(p)
p = figure(title="square_x")
p.scatter(x, y, marker="square_x", size=sizes, color="#FDAE6B",
fill_color=None, line_width=2)
vplot.children.append(p)
p = figure(title="asterisk")
p.scatter(x, y, marker="asterisk", size=sizes, color="#F0027F", line_width=2)
vplot.children.append(p)
p = figure(title="square_cross")
p.scatter(x, y, marker="square_cross", size=sizes, color="#7FC97F",
fill_color=None, line_width=2)
vplot.children.append(p)
p = figure(title="diamond_cross")
p.scatter(x, y, marker="diamond_cross", size=sizes, color="#386CB0",
fill_color=None, line_width=2)
vplot.children.append(p)
p = figure(title="circle_cross")
p.scatter(x, y, marker="circle_cross", size=sizes, color="#FB8072",
fill_color=None, line_width=2)
vplot.children.append(p)
show(vplot) # open a browser
| bsd-3-clause |
CopeX/odoo | addons/l10n_at/__init__.py | 438 | 1050 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jacobmetrick/Flexget | flexget/plugins/list/subtitle_list.py | 3 | 13832 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
import os
from collections import MutableSet
from datetime import datetime, date, time
from sqlalchemy import Column, Unicode, Integer, ForeignKey, func, DateTime, and_
from sqlalchemy.orm import relationship
from babelfish import Language
from flexget import plugin
from flexget.manager import Session
from flexget.db_schema import versioned_base, with_session
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.tools import parse_timedelta
from flexget.utils.template import RenderError
log = logging.getLogger('subtitle_list')
Base = versioned_base('subtitle_list', 1)
#: Video extensions stolen from https://github.com/Diaoul/subliminal/blob/master/subliminal/video.py
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.omf', '.ps',
'.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo', '.vob',
'.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
def normalize_language(language):
if isinstance(language, Language):
return str(language)
return str(Language.fromietf(language))
def normalize_path(path):
return os.path.normpath(os.path.abspath(path)) if path else None
class SubtitleListList(Base):
__tablename__ = 'subtitle_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
files = relationship('SubtitleListFile', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic')
def __repr__(self):
return '<SubtitleListList name=%s,id=%d>' % (self.name, self.id)
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'added_on': self.added
}
class SubtitleListFile(Base):
__tablename__ = 'subtitle_list_files'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
location = Column(Unicode)
list_id = Column(Integer, ForeignKey(SubtitleListList.id), nullable=False)
languages = relationship('SubtitleListLanguage', backref='file', lazy='joined', cascade='all, delete-orphan')
remove_after = Column(Unicode)
def __repr__(self):
return '<SubtitleListFile title=%s,path=%s,list_name=%s>' % (self.title, self.location, self.list.name)
def to_entry(self):
entry = Entry()
entry['title'] = self.title
entry['url'] = 'mock://localhost/subtitle_list/%d' % self.id
entry['location'] = self.location
entry['remove_after'] = self.remove_after
entry['added'] = self.added
entry['subtitle_languages'] = []
for subtitle_language in self.languages:
entry['subtitle_languages'].append(Language.fromietf(subtitle_language.language))
return entry
def to_dict(self):
subtitle_languages = [subtitle_list_language.language for subtitle_list_language in self.languages]
return {
'id': self.id,
'added_on': self.added,
'title': self.title,
'location': self.location,
'subtitle_languages': subtitle_languages
}
class SubtitleListLanguage(Base):
__tablename__ = 'subtitle_list_languages'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
language = Column(Unicode)
subtitle_list_file_id = Column(Integer, ForeignKey('subtitle_list_files.id'))
class SubtitleList(MutableSet):
schema = {
'type': 'object',
'properties': {
'list': {'type': 'string'},
'languages': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
'check_subtitles': {'type': 'boolean', 'default': True},
'remove_after': {'type': 'string', 'format': 'interval'},
'path': {'type': 'string'},
'allow_dir': {'type': 'boolean', 'default': False},
'recursion_depth': {'type': 'integer', 'default': 1, 'minimum': 1},
'force_file_existence': {'type': 'boolean', 'default': True}
},
'required': ['list'],
'additionalProperties': False
}
def _db_list(self, session):
return session.query(SubtitleListList).filter(SubtitleListList.name == self.config['list']).first()
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
@with_session
def __init__(self, config, session=None):
self.config = config
db_list = self._db_list(session)
if not db_list:
session.add(SubtitleListList(name=self.config['list']))
def __iter__(self):
with Session() as session:
return iter([file.to_entry() for file in self._db_list(session).files])
def __len__(self):
with Session() as session:
return self._db_list(session).files.count()
def _extract_path(self, entry):
path = ''
if isinstance(self.config.get('path'), basestring):
try:
path = entry.render(self.config['path'])
except RenderError as e:
log.error(e)
else:
path = entry.get('location')
return normalize_path(path)
def add(self, entry):
with Session() as session:
path = self._extract_path(entry)
if not path:
log.error('Entry %s does not represent a local file/dir.')
return
path_exists = os.path.exists(path)
if self.config['force_file_existence'] and not path_exists:
log.error('Path %s does not exist. Not adding to list.', path)
return
elif path_exists and not self.config.get('allow_dir') and os.path.isdir(path):
log.error('Path %s is a directory and "allow_dir"=%s.', path, self.config['allow_dir'])
return
# Check if this is already in the list, refresh info if so
db_list = self._db_list(session=session)
db_file = self._find_entry(entry, session=session)
# Just delete and re-create to refresh
if db_file:
session.delete(db_file)
db_file = SubtitleListFile()
db_file.title = entry['title']
db_file.location = path
db_file.languages = []
db_file.remove_after = self.config.get('remove_after')
db_file.languages = []
normalized_languages = {normalize_language(subtitle_language) for subtitle_language in
self.config.get('languages', [])}
for subtitle_language in normalized_languages:
language = SubtitleListLanguage(language=subtitle_language)
db_file.languages.append(language)
log.debug('adding entry %s with languages %s', entry, normalized_languages)
db_list.files.append(db_file)
session.commit()
return db_file.to_entry()
def discard(self, entry):
with Session() as session:
db_file = self._find_entry(entry, session=session)
if db_file:
log.debug('deleting file %s', db_file)
session.delete(db_file)
def __contains__(self, entry):
return self._find_entry(entry, match_file_to_dir=True) is not None
@with_session
def _find_entry(self, entry, match_file_to_dir=False, session=None):
"""Finds `SubtitleListFile` corresponding to this entry, if it exists."""
path = self._extract_path(entry)
res = self._db_list(session).files.filter(SubtitleListFile.location == path).first()
if not res and match_file_to_dir:
path = os.path.dirname(path)
res = self._db_list(session).files.filter(SubtitleListFile.location == path).first()
return res
@with_session
def _find_language(self, file_id, language, session=None):
res = session.query(SubtitleListLanguage).filter(and_(
func.lower(SubtitleListLanguage.language) == str(language).lower(),
SubtitleListLanguage.subtitle_list_file_id == file_id)).first()
return res
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return False
@with_session
def get(self, entry, session):
match = self._find_entry(entry=entry, session=session)
return match.to_entry() if match else None
class PluginSubtitleList(object):
"""Subtitle list"""
schema = SubtitleList.schema
@staticmethod
def get_list(config):
return SubtitleList(config)
def all_subtitles_exist(self, file, wanted_languages):
try:
import subliminal
existing_subtitles = set(subliminal.core.search_external_subtitles(file).values())
if wanted_languages and len(wanted_languages - existing_subtitles) == 0:
log.info('Local subtitle(s) already exists for %s.', file)
return True
return False
except ImportError:
log.warning('Subliminal not found. Unable to check for local subtitles.')
def on_task_input(self, task, config):
subtitle_list = SubtitleList(config)
recursion_depth = config['recursion_depth']
# A hack to not output certain files without deleting them from the list
temp_discarded_items = set()
for item in subtitle_list:
if not config['force_file_existence'] and not os.path.exists(item['location']):
log.error('File %s does not exist. Skipping.', item['location'])
temp_discarded_items.add(item)
continue
if not os.path.exists(item['location']):
log.error('File %s does not exist. Removing from list.', item['location'])
subtitle_list.discard(item)
continue
if self._expired(item, config):
log.info('File %s has been in the list for %s. Removing from list.', item['location'],
item['remove_after'] or config['remove_after'])
subtitle_list.discard(item)
continue
languages = set(item['subtitle_languages']) or set(config.get('languages', []))
num_potential_files = 0
num_added_files = 0
if os.path.isdir(item['location']):
# recursion depth 1 is no recursion
max_depth = len(normalize_path(item['location']).split(os.sep)) + recursion_depth - 1
for root_dir, _, files in os.walk(item['location']):
current_depth = len(root_dir.split(os.sep))
if current_depth > max_depth:
break
for file in files:
if os.path.splitext(file)[1] not in VIDEO_EXTENSIONS:
log.debug('File %s is not a video file. Skipping', file)
continue
num_potential_files += 1
file_path = normalize_path(os.path.join(root_dir, file))
if not config['check_subtitles'] or not self.all_subtitles_exist(file_path, languages):
subtitle_list.config['languages'] = languages
subtitle_list.add(Entry(title=os.path.splitext(os.path.basename(file_path))[0],
url='file://' + file_path, location=file_path))
num_added_files += 1
# delete the original dir if it contains any video files
if num_added_files or num_potential_files:
log.debug('Added %s file(s) from %s to subtitle list %s', num_added_files, item['location'],
config['list'])
subtitle_list.discard(item)
else:
log.debug('No files found in %s. Skipping.', item['location'])
temp_discarded_items.add(item)
elif config['check_subtitles'] and self.all_subtitles_exist(item['location'], languages):
subtitle_list.discard(item)
return list(set(subtitle_list) - temp_discarded_items)
@classmethod
def _expired(cls, file, config):
added_interval = datetime.combine(date.today(), time()) - file['added']
if file['remove_after'] and added_interval > parse_timedelta(file['remove_after']):
return True
elif config.get('remove_after') and added_interval > parse_timedelta(config['remove_after']):
return True
return False
@event('plugin.register')
def register_plugin():
plugin.register(PluginSubtitleList, 'subtitle_list', api_ver=2, interfaces=['task', 'list'])
| mit |
wevoice/wesub | apps/videos/types/video_google.py | 5 | 1410 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from base import VideoType
from vidscraper.sites import google_video
class GoogleVideoType(VideoType):
abbreviation = 'G'
name = 'video.google.com'
site = 'video.google.com'
def convert_to_video_url(self):
return self.format_url(self.url)
@classmethod
def matches_video_url(cls, url):
return bool(google_video.GOOGLE_VIDEO_REGEX.match(url))
def set_values(self, video_obj):
video_obj.title = google_video.scrape_title(self.url)
video_obj.description = google_video.scrape_description(self.url)
raise Warning('GoogleVideoType does not support thumbnail loading')
| agpl-3.0 |
ElGatoLoco/DRF-Webpack-ES6-React-SASS---starter-kit | backend/main/settings.py | 1 | 3343 | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a(sm_m6b_9ji0!q)lpi%4m7!kb+=xy8koap@0@#=#ub9787aq^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'corsheaders',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
| mit |
sondree/Master-thesis | Python EA/ea/progress_drawing.py | 1 | 3860 | #!/usr/bin/python
from matplotlib.patches import Rectangle, Circle, RegularPolygon, Arrow
from fitness import FITNESS_SECTION
class BasicDrawing(object):
def __init__(self,config):
self.config = config
self.grid_x = self.config.getint(FITNESS_SECTION,"grid_x")
self.grid_y = self.config.getint(FITNESS_SECTION,"grid_y")
self.emitter_x = self.config.getint(FITNESS_SECTION,"emitter_pos_x")
self.emitter_y = self.config.getint(FITNESS_SECTION,"emitter_pos_y")
def make_emitter_patch(self):
s = (self.grid_x + self.grid_y)/2.0 * 0.008
c = RegularPolygon((self.emitter_x,self.emitter_y), 4, s, 3.14/2.0,facecolor='r',edgecolor='k',alpha=0.8)
return c
def make_receiver_patch(self,x,y,color='b'):
s = (self.grid_x + self.grid_y)/2.0 * 0.005
c = Circle((x,y),s,facecolor=color,edgecolor='k',alpha=0.6)
return c
def make_prediction_patch(self,x,y):
s = (self.grid_x + self.grid_y)/2.0 * 0.003
c = Circle((x,y),s,facecolor='k',edgecolor='k',alpha=0.2)
return c
def plot_pheno(self,view,pheno,**args):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for position in pheno.get_position():
try:
x,y,z = position
except ValueError:
x,y = position
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
class PathDrawing(BasicDrawing):
def make_receiver_path(self,from_x,from_y,to_x,to_y, alpha=0.2):
c = Arrow(from_x,from_y,to_x-from_x,to_y-from_y,width=1.0, alpha=alpha)
return c
def plot_pheno(self,view,pheno,draw_points=True, draw_lines=True):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for index in xrange(pheno.get_receiver_count()):
p_x, p_y = pheno.get_receiver_origin(index)
c0 = self.make_receiver_patch(p_x,p_y, color = (0.0,1.0,0.5))
view.add_artist(c0)
for position in pheno.get_receiver_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
class PathIncrDrawing(PathDrawing):
def plot_pheno(self,view,pheno,draw_points=True, draw_lines=True):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for index in xrange(pheno.get_receiver_count()):
p_x, p_y = pheno.get_receiver_origin(index)
c0 = self.make_receiver_patch(p_x,p_y, color = (0.0,1.0,0.5))
view.add_artist(c0)
for position in pheno.get_receiver_fixed_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y,color=(1.0,0.3,0))
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
for position in pheno.get_receiver_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
| gpl-3.0 |
sandyjmacdonald/dots_for_microarrays | dots_backend/dots_analysis.py | 1 | 12261 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import warnings
import pandas as pd
import numpy as np
import scipy.cluster.hierarchy as hac
from dots_arrays import Experiment
from sklearn.decomposition import PCA
from itertools import combinations
from scipy.stats import ttest_ind, f_oneway
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.multicomp import MultiComparison
from sklearn.metrics import silhouette_score, silhouette_samples
from sklearn.cluster import KMeans
## Functions ##
def run_pca(experiment):
'''Run PCA when given an experiment instance or data frame with expression values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A Pandas data frame with results of PCA analysis.
'''
## The below if/elif checks whether experiment passed to function is an instance of the
## Experiment class or just a data frame with expression values in.
if isinstance(experiment, Experiment):
df = experiment.get_exp_values().T
elif isinstance(experiment, pd.DataFrame):
df = experiment.T
## Run the PCA, get the scores and unzip tuples into separate lists of x and y values.
pca = PCA(n_components=3)
pca_fit = pca.fit_transform(df)
vals = [(x[0], x[1]) for x in pca_fit]
xvals, yvals = zip(*vals)
## Convert the data into a dictionary for easy conversion into a Pandas data frame.
pca_dict = {'xvals': xvals, 'yvals': yvals, 'sampleid': list(df.index), 'group': [x.split('_')[0] for x in list(df.index)]}
pca_df = pd.DataFrame(pca_dict)
return pca_df
def get_fold_changes(experiment):
'''Calculate pairwise fold change and log fold change values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A new Pandas data frame with pairwise fold change and log fold change values.
'''
groups = experiment.get_groups()
pairs = map(list, list(combinations(groups, 2)))
if all([g.isdigit() for g in groups]):
pairs = sorted(pairs, key=lambda x:x[0])
samples = experiment.get_sampleids()
df = experiment.df
new_df = df.ix[:, :5].copy()
for group in groups:
ids = [sample for sample in samples if group == sample.split('_')[0]]
new_df['mean_' + group] = df[ids].mean(axis=1)
del df
## For each pair, calculate mean values for each group, fold changes and log2 fold changes.
for pair in pairs:
if all([g.isdigit() for g in pair]):
pair.sort(key=int, reverse=True)
else:
pair.sort()
name_1, name_2 = pair
new_df['abs_mean_diff_' + name_1 + '_' + name_2] = abs((2 ** new_df['mean_' + name_1]) - (2 ** new_df['mean_' + name_2]))
new_df['logFC_' + name_1 + '_' + name_2] = new_df['mean_' + name_1] - new_df['mean_' + name_2]
new_df['FC_' + name_1 + '_' + name_2] = 2 ** new_df['logFC_' + name_1 + '_' + name_2]
return new_df
def run_stats(experiment):
'''Run independent T-test or one-way ANOVA dependent on number of groups.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A new Pandas data frame with p values, adjusted p values and Tukey HSD
post-hoc results if there are > 2 groups.
'''
groups = experiment.get_groups()
samples = experiment.get_sampleids()
df = experiment.df
all_vals = []
## Get values for each group, ready for T-test or ANOVA.
for group in groups:
ids = [sample for sample in samples if group == sample.split('_')[0]]
vals = map(list, df[ids].values)
all_vals.append(vals)
## Decide whether to use T-test or ANOVA dependent on number of groups.
if len(groups) == 2:
p_vals = [ttest_ind(all_vals[0][i], all_vals[1][i])[1] for i in range(len(all_vals[0]))]
else:
p_vals = []
for i in range(len(all_vals[0])):
row_vals = [all_vals[j][i] for j in range(len(groups))]
p_val = f_oneway(*row_vals)[1]
p_vals.append(p_val)
## Adjust the p values and create a new data frame with them in.
p_val_adj = list(multipletests(p_vals, method='fdr_bh')[1])
new_df = df.ix[:, :5].copy()
new_df['p_val'] = pd.Series(p_vals, index=new_df.index)
new_df['p_val_adj'] = pd.Series(p_val_adj, index=new_df.index)
## Post-hoc test.
## Only do the post-hoc test if there are more than 2 groups, duh!
if len(groups) > 2:
vals_df = df[samples]
group_ids = [sample.split('_')[0] for sample in vals_df.columns.values]
posthoc_results = {}
## Run the post-hoc test on each row.
for row in range(len(vals_df)):
row_vals = vals_df.ix[row]
mc = MultiComparison(row_vals, group_ids)
mc_groups = mc.groupsunique
results = mc.tukeyhsd()
significant = results.reject
pairs = zip(*[x.tolist() for x in mc.pairindices])
## Go through each pair and add results to the posthoc_results dictionary.
for i in range(len(pairs)):
pair = list(pairs[i])
pair.sort()
pair_name = str(mc_groups[pair[0]]) + '_' + str(mc_groups[pair[1]])
if pair_name in posthoc_results:
posthoc_results[pair_name].append(significant[i])
else:
posthoc_results[pair_name] = [significant[i]]
## Add the post-hoc results to the data frame.
for pair_name in posthoc_results:
new_df['significant_' + pair_name] = posthoc_results[pair_name]
return new_df
def find_clusters(df, k_vals=[4, 9, 16, 25], how='hierarchical'):
'''Find clusters, and if method is k-means run silhouette analysis
to determine the value of k.
Args:
df (data frame): A data frame with normalised expression data.
k_vals (list or range): The range over which to test k.
how ('hierarchical' or 'kmeans'): Clustering method.
Returns:
A list of cluster numbers.
'''
## Don't run the silhouette analysis for hierarchical clustering,
## just calculate the clusters using estimate of k.
if how == 'hierarchical':
k = int(np.sqrt((len(df) / 2.0)))
hc = hac.linkage(df, method='average')
optimal_clusters = hac.fcluster(hc, t=k, criterion='maxclust')
## If method is k-means, run silhouette analysis.
elif how == 'kmeans':
best_combined_score = 0
optimal_k = 2
## Try values of k from range and keep track of optimal k according
## to silhouette score.
for k in k_vals:
km = KMeans(n_clusters=k, random_state=10)
clusters = km.fit_predict(df)
silhouette_avg = silhouette_score(df, clusters)
sample_silhouette_values = silhouette_samples(df, clusters)
above_mean = 0
silhouette_sizes = []
for i in range(k):
ith_cluster_silhouette_values = sample_silhouette_values[clusters == i]
size_cluster_i = ith_cluster_silhouette_values.shape[0]
silhouette_sizes.append(size_cluster_i)
if max(ith_cluster_silhouette_values) > silhouette_avg:
above_mean += 1
## This combined score should pick the best value of k
above_mean_score = float(above_mean) / k
std_score = 1.0/np.std(silhouette_sizes) if np.std(silhouette_sizes) > 1.0 else 1.0
combined_score = (silhouette_avg + above_mean_score + std_score) / 3
## Put the clusters in the new column in the data frame.
if combined_score > best_combined_score:
best_combined_score = combined_score
optimal_k = k
optimal_clusters = clusters
optimal_clusters = [cluster + 1 for cluster in optimal_clusters]
return optimal_clusters
def get_clusters(experiment, how='hierarchical'):
'''Clusters significantly differentially expressed genes by expression pattern
across the samples using hierarchical or k-means clustering and silhouette analysis
to pick the value of k (via the find_clusters function).
Args:
experiment (Experiment instance): An instance of the Experiment class.
how ('hierarchical' or 'kmeans'): Clustering method.
Returns:
A new Pandas data frame with fold changes, p values and clusters.
'''
## Run the stats to filter genes down to significant ones only.
stats = run_stats(experiment)
stats = stats[['FeatureNum', 'p_val', 'p_val_adj']].copy()
## Get the fold changes
fcs = get_fold_changes(experiment)
keep_cols = [x for x in fcs.columns.values if 'logFC' in x or 'abs_mean_diff' in x]
fc_cols = [x for x in fcs.columns.values if 'logFC' in x]
fcs = fcs[['FeatureNum'] + keep_cols].copy()
norm_exp_cols = experiment.get_sampleids()
abs_mean_diff_cols = [x for x in fcs.columns.values if 'abs_mean_diff' in x]
## Merge together the stats and fold changes data frames.
merged_df = pd.merge(experiment.df, stats, on='FeatureNum')
merged_df = pd.merge(merged_df, fcs, on='FeatureNum')
## Filter the merged data frame to leave only significantly differentially
## expressed genes (adj. p < 0.05. Also, increase the fold change cutoff until there
## are less than 2,500 rows left in the data frame (so that heat maps can be drawn).
filtered_df = merged_df[(merged_df['p_val_adj'] < 0.05) & ((abs(merged_df[fc_cols]) > np.log2(float(1))).any(1) == True) & ((merged_df[abs_mean_diff_cols] > 0.5).any(1) == True)].copy()
i = 2
while len(filtered_df) * len(experiment.get_sampleids()) > 40000:
filtered_df = merged_df[(merged_df['p_val_adj'] < 0.05) & ((abs(merged_df[fc_cols]) > np.log2(float(i))).any(1) == True) & ((merged_df[abs_mean_diff_cols] > 0.5).any(1) == True)].copy()
i += 1
## Clean up.
del merged_df
del stats
del fcs
## A good guesstimate for k.
k_limit = int(np.sqrt((len(filtered_df) / 2)))
## Catches numpy warnings about means of empty slices.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
## Hierarchical clustering.
if how == 'hierarchical':
clusters = find_clusters(filtered_df[norm_exp_cols], how='hierarchical')
filtered_df['cluster'] = clusters
## K-means clustering with silhouette analysis to determine value of k.
elif how == 'kmeans':
clusters = find_clusters(filtered_df[norm_exp_cols], k_vals=range(3, k_limit), how='kmeans')
filtered_df['cluster'] = clusters
## Sort the data frame by cluster and mean expression across samples.
filtered_df['mean_norm_expression'] = filtered_df[norm_exp_cols].mean(axis=0)
filtered_df.sort_values(by=['cluster', 'mean_norm_expression'], ascending=[True, False], inplace=True)
filtered_df = filtered_df.reset_index(drop=True)
return filtered_df
def write_fcs_stats(experiment, outfile='foldchanges_stats.txt'):
'''Creates a tab-separated table with a full list of fold changes,
p values, adjusted p values and post hoc results.
Args:
experiment (Experiment instance): An instance of the Experiment class.
outfile (string): The name of the table-separated table to be created.
'''
## Run the stats and fold changes and merge them into a single data frame.
stats = run_stats(experiment)
posthoc_cols = [colname for colname in stats.columns.values if 'significant' in colname]
stats = stats[['FeatureNum', 'p_val', 'p_val_adj'] + posthoc_cols]
fcs = get_fold_changes(experiment)
fc_cols = [colname for colname in fcs.columns.values if not 'abs_mean_diff_' in colname]
merged_df = pd.merge(fcs, stats, on='FeatureNum')
## Define the order of the columns in the data frame.
colnames = list(merged_df.columns.values)
global col_order
col_order = ['mean', 'FC', 'logFC', 'abs_mean_diff', 'p_val', 'adj_p_val', 'significant']
## Function to custom sort the columns.
def keyfunc(col):
for c in col_order:
if col.startswith(c):
return (col_order.index(c), col.lstrip(c + '_'))
break
## Sort the columns.
sorted_colnames = colnames[:5] + sorted(colnames[5:], key=keyfunc)
merged_df = merged_df[sorted_colnames]
## Fix the type of the FeatureNum column and sort it.
merged_df['FeatureNum'] = merged_df['FeatureNum'].astype(int)
merged_df.sort_values(by='FeatureNum', ascending=True, inplace=True)
## Write the table.
merged_df.to_csv(outfile, sep='\t', index=False)
def write_normalised_expression(experiment, outfile='normalised_expression.txt'):
'''Creates a tab-separated table with all of the normalised expression values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
outfile (string): The name of the table-separated table to be created.
'''
## Read in the experiment.
experiment_df = experiment.df
## Sort the values columns.
colnames = list(experiment_df.columns.values)
sorted_colnames = colnames[:5] + sorted(colnames[5:])
experiment_df = experiment_df[sorted_colnames]
## Write the table.
experiment_df.to_csv(outfile, sep='\t', index=False)
| mit |
aviciimaxwell/odoo | addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jrbl/invenio | modules/bibformat/lib/elements/bfe_url.py | 39 | 1484 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints full-text URLs
"""
__revision__ = "$Id$"
def format_element(bfo, style, separator='; '):
"""
This is the default format for formatting full-text URLs.
@param separator: the separator between urls.
@param style: CSS class of the link
"""
urls_u = bfo.fields("8564_u")
if style != "":
style = 'class="'+style+'"'
urls = ['<a '+ style + \
'href="' + url + '">' + url +'</a>'
for url in urls_u]
return separator.join(urls)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
rukeon/py_ideapage | lib/werkzeug/__init__.py | 296 | 7210 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.9.4'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy',
'LocalStack', 'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote',
'url_unquote_plus', 'url_fix', 'Href',
'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property',
'append_slash_redirect', 'redirect',
'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder',
'validate_arguments', 'ArgumentValidationError',
'bind_arguments', 'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator',
'FileWrapper', 'make_line_iter', 'LimitedStream',
'responder', 'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict', 'ImmutableTypeConversionDict',
'Accept', 'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date',
'cookie_date', 'parse_cache_control_header',
'is_resource_modified', 'parse_accept_header',
'parse_set_header', 'quote_etag', 'unquote_etag',
'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header',
'remove_entity_headers', 'is_entity_header',
'remove_hop_by_hop_headers', 'parse_options_header',
'dump_options_header', 'is_hop_by_hop_header',
'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request',
'Response', 'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin',
'UserAgentMixin', 'AuthorizationMixin',
'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
| apache-2.0 |
willingc/pip | pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| mit |
cypod/arsenalsuite | cpp/apps/absubmit/fusionsubmit/submit.py | 10 | 11573 | #!/usr/bin/python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.uic import *
from blur.Stone import *
from blur.Classes import *
from blur.Classesui import HostSelector
from blur.absubmit import Submitter
from blur import RedirectOutputToLog
import sys
import time
import os.path
import random
if sys.platform == 'win32':
import win32api
class FusionRenderDialog(QDialog):
def __init__(self,parent=None):
QDialog.__init__(self,parent)
loadUi("fusionrenderdialogui.ui",self)
self.connect( self.mAutoPacketSizeCheck, SIGNAL('toggled(bool)'), self.autoPacketSizeToggled )
self.connect( self.mChooseFileNameButton, SIGNAL('clicked()'), self.chooseFileName )
self.connect( self.mAllHostsCheck, SIGNAL('toggled(bool)'), self.allHostsToggled )
self.connect( self.mHostListButton, SIGNAL('clicked()'), self.showHostSelector )
self.layout().setSizeConstraint(QLayout.SetFixedSize);
self.mProjectCombo.setSpecialItemText( 'None' )
self.mProjectCombo.setStatusFilters( ProjectStatusList(ProjectStatus.recordByName( 'Production' )) )
self.OutputPath = None
self.HostList = ''
self.Version = '5.2'
self.Platform = 'IA32'
self.Services = []
self.loadSettings()
def loadSettings(self):
c = userConfig()
c.pushSection( "LastSettings" )
project = Project.recordByName( c.readString( "Project" ) )
if project.isRecord():
self.mProjectCombo.setProject( project )
aps = c.readBool( "AutoPacketSize", True )
self.mAutoPacketSizeCheck.setChecked( aps )
if not aps:
self.mPacketSizeSpin.setValue( c.readInt( "PacketSize", 10 ) )
self.mFileNameEdit.setText( c.readString( "FileName" ) )
self.mFrameListEdit.setText( c.readString( "FrameList" ) )
self.mSequentialRadio.setChecked( c.readString( "PacketType", "random" ) == "sequential" )
self.mJabberErrorsCheck.setChecked( c.readBool( "JabberErrors", False ) )
self.mJabberCompletionCheck.setChecked( c.readBool( "JabberCompletion", False ) )
self.mEmailErrorsCheck.setChecked( c.readBool( "EmailErrors", False ) )
self.mEmailCompletionCheck.setChecked( c.readBool( "EmailCompletion", False ) )
self.mPrioritySpin.setValue( c.readInt( "Priority", 10 ) )
self.mDeleteOnCompleteCheck.setChecked( c.readBool( "DeleteOnComplete", False ) )
self.mSubmitSuspendedCheck.setChecked( c.readBool( "SubmitSuspended", False ) )
c.popSection()
def saveSettings(self):
c = userConfig()
c.pushSection( "LastSettings" )
c.writeString( "Project", self.mProjectCombo.project().name() )
c.writeBool( "AutoPacketSize", self.mAutoPacketSizeCheck.isChecked() )
c.writeInt( "PacketSize", self.mPacketSizeSpin.value() )
c.writeString( "FileName", self.mFileNameEdit.text() )
c.writeString( "FrameList", self.mFrameListEdit.text() )
c.writeString( "PacketType", {True : "sequential", False: "random"}[self.mSequentialRadio.isChecked()] )
c.writeBool( "JabberErrors", self.mJabberErrorsCheck.isChecked() )
c.writeBool( "JabberCompletion", self.mJabberCompletionCheck.isChecked() )
c.writeBool( "EmailErrors", self.mEmailErrorsCheck.isChecked() )
c.writeBool( "EmailCompletion", self.mEmailCompletionCheck.isChecked() )
c.writeInt( "Priority", self.mPrioritySpin.value() )
c.writeBool( "DeleteOnComplete", self.mDeleteOnCompleteCheck.isChecked() )
c.writeBool( "SubmitSuspended", self.mSubmitSuspendedCheck.isChecked() )
c.popSection()
def autoPacketSizeToggled(self,autoPacketSize):
self.mPacketSizeSpin.setEnabled(not autoPacketSize)
def allHostsToggled(self,allHosts):
self.mHostListButton.setEnabled( not allHosts )
def showHostSelector(self):
hs = HostSelector(self)
hs.setServiceFilter( ServiceList(Service.recordByName( 'Fusion5.2' )) )
hs.setHostList( self.HostList )
if hs.exec_() == QDialog.Accepted:
self.HostList = hs.hostStringList()
del hs
def chooseFileName(self):
fileName = QFileDialog.getOpenFileName(self,'Choose Flow To Render', QString(), 'Fusion Flows (*.comp)' )
if not fileName.isEmpty():
self.mFileNameEdit.setText(fileName)
def checkFrameList(self):
(frames, valid) = expandNumberList( self.mFrameListEdit.text() )
return valid
def packetTypeString(self):
if self.mSequentialRadio.isChecked():
return 'sequential'
return 'random'
def packetSize(self):
if self.mAutoPacketSizeCheck.isChecked():
return 0
return self.mPacketSizeSpin.value()
def buildNotifyString(self,jabber,email):
ret = ''
if jabber or email:
ret = getUserName() + ':'
if jabber:
ret += 'j'
if email:
ret += 'e'
return ret
# Returns tuple (notifyOnErrorString,notifyOnCompleteString)
def buildNotifyStrings(self):
return (
self.buildNotifyString(self.mJabberErrorsCheck.isChecked(), self.mEmailErrorsCheck.isChecked() ),
self.buildNotifyString(self.mJabberCompletionCheck.isChecked(), self.mEmailCompletionCheck.isChecked() ) )
def buildAbsubmitArgs(self):
sl = {}
sl['jobType'] = 'Fusion'
sl['packetType'] = self.packetTypeString()
sl['priority'] = str(self.mPrioritySpin.value())
sl['user'] = getUserName()
sl['packetSize'] = str(self.packetSize())
if self.mAllFramesAsSingleTaskCheck.isChecked():
sl['allframesassingletask'] = 'true'
sl['frameList'] = str('1')
else:
sl['frameList'] = self.mFrameListEdit.text()
sl['fileName'] = self.mFileNameEdit.text()
notifyError, notifyComplete = self.buildNotifyStrings()
sl['notifyOnError'] = notifyError
sl['notifyOnComplete'] = notifyComplete
sl['job'] = self.mJobNameEdit.text()
sl['deleteOnComplete'] = str(int(self.mDeleteOnCompleteCheck.isChecked()))
if self.mProjectCombo.project().isRecord():
sl['projectName'] = self.mProjectCombo.project().name()
if not self.mOutputPathCombo.currentText().isEmpty():
sl['outputPath'] = str(self.mOutputPathCombo.currentText())
if not self.mAllHostsCheck.isChecked() and len(self.HostList):
sl['hostList'] = str(self.HostList)
sl['outputCount'] = str(self.mOutputPathCombo.count())
if self.Version:
# Cut off to Major.Minor version, ex. 5.21 -> 5.2
self.Version = '%.01f' % float(self.Version)
service = 'Fusion' + self.Version
if self.Platform == 'IA64' or self.Platform == 'X64':
service += 'x64'
dialog.Services.append( service )
if not Service.recordByName( service ).isRecord():
QMessageBox.critical(self, 'Service %s not found' % service, 'No service found for %s, please contact IT' % service )
raise ("Invalid Fusion Version %s" % service)
if len(self.Services):
sl['services'] = ','.join(self.Services)
if self.mSubmitSuspendedCheck.isChecked():
sl['submitSuspended'] = '1'
Log("Applying Absubmit args: %s" % str(sl))
return sl
def accept(self):
if self.mJobNameEdit.text().isEmpty():
QMessageBox.critical(self, 'Missing Job Name', 'You must choose a name for this job' )
return
if not QFile.exists( self.mFileNameEdit.text() ):
QMessageBox.critical(self, 'Invalid File', 'You must choose an existing fusion flow' )
return
if not self.checkFrameList():
QMessageBox.critical(self, 'Invalid Frame List', 'Frame Lists are comma separated lists of either "XXX", or "XXX-YYY"' )
return
self.saveSettings()
if self.mDeleteFramesBeforeSubmitCheck.isChecked():
## tFileName = str(self.mOutputPathCombo.currentText())
## tFileDelete = os.path.dirname(tFileName) + "/*" + os.path.splitext(os.path.basename(tFileName))[1]
## self.__specialDeleteMsg(tFileDelete)
## time.sleep(60)
for loop in (range(self.mOutputPathCombo.count())):
tFileName = str(self.mOutputPathCombo.itemText(loop))
tFileDelete = os.path.dirname(tFileName) + "/*" + os.path.splitext(os.path.basename(tFileName))[1]
self.__specialDeleteMsg(tFileDelete)
time.sleep(60)
submitter = Submitter(self)
self.connect( submitter, SIGNAL( 'submitSuccess()' ), self.submitSuccess )
self.connect( submitter, SIGNAL( 'submitError( const QString & )' ), self.submitError )
submitter.applyArgs( self.buildAbsubmitArgs() )
submitter.submit()
def submitSuccess(self):
Log( 'Submission Finished Successfully' )
QDialog.accept(self)
def submitError(self,errorMsg):
QMessageBox.critical(self, 'Submission Failed', 'Submission Failed With Error: ' + errorMsg)
Log( 'Submission Failed With Error: ' + errorMsg )
QDialog.reject(self)
#================================================================================================
def __sendMessage ( self, msgPath , msg ):
# we use a random number and username to create an unique file
msgFileNamePath = msgPath # TBR: hard coded path!! what happens with offsite people!
msgFileNameFile = getUserName() + str( random.randint(0, 65535) )
msgFileNameExtTemp = ".tmp"
msgFileNameExt = ".msg"
msgFile = file (msgFileNamePath + msgFileNameFile + msgFileNameExtTemp, "w")
if msgFile:
msgFile.write ( msg + "\n" )
msgFile.close()
os.rename( (msgFileNamePath + msgFileNameFile + msgFileNameExtTemp) , (msgFileNamePath + msgFileNameFile + msgFileNameExt) )
return True
return False
def __specialDeleteMsg ( self, inPathToDelete ):
#----------------------------------------------------------------------------
# Prototype:
# function syncPCDirectory ( inPathToDelete ):
#
# Remarks:
# syncs pc directories
# Parameters:
# <string> inPathToDelete
# Returns:
# <bool> true if the message to IT could be sent, false otherwise
#----------------------------------------------------------------------------
msgPath = '//thor/spool/new/'
#pathToDelete = blurFile.ConvertToUNC ( inPathToDelete )
pathToDelete = inPathToDelete.replace("G:", "//thor/animation").replace("Q:", "//cougar/compOutput").replace("S:", "//cheetah/renderOutput").replace("U:", "//goat/renderOutput")
msg = " { \n\
action => rm, \n\
data => \n\
{ \n\
dir => '%s', \n\
verbose => 1 \n\
}, \n\
info => { user => '%s' } \n\
}"
localName = "fusion-job"
if sys.platform == 'win32':
localName = win32api.GetUserName()
msg = msg % ( pathToDelete , localName)
return self.__sendMessage ( msgPath , msg )
if __name__ == "__main__":
app = QApplication(sys.argv)
initConfig("../absubmit.ini","fusionsubmit.log")
RedirectOutputToLog()
cp = 'h:/public/' + getUserName() + '/Blur'
if not QDir( cp ).exists():
cp = 'C:/Documents and Settings/' + getUserName()
initUserConfig( cp + "/fusionsubmit.ini" );
blurqt_loader()
dialog = FusionRenderDialog()
Log( "Parsing args: " + ','.join(sys.argv) )
for i, key in enumerate(sys.argv[1::2]):
val = sys.argv[(i+1)*2]
if key == 'fileName':
dialog.mFileNameEdit.setText(val)
dialog.mJobNameEdit.setText(QFileInfo(val).completeBaseName())
path = Path(val)
if path.level() >= 1:
p = Project.recordByName( path[1] )
if p.isRecord():
dialog.mProjectCombo.setProject( p )
elif key == 'frameList':
dialog.mFrameListEdit.setText(val)
elif key == 'services':
dialog.Services += val.split(',')
elif key == 'outputs':
dialog.mOutputPathCombo.addItems( QString(val).split(',') )
elif key == 'version':
dialog.Version = val
elif key == 'platform':
dialog.Platform = val
ret = dialog.exec_()
shutdown()
sys.exit( ret )
| gpl-2.0 |
sgerhart/ansible | lib/ansible/plugins/action/cli_config.py | 27 | 1190 | #
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.normal import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'network_cli':
return {'failed': True, 'msg': 'Connection type %s is not valid for cli_config module' % self._play_context.connection}
return super(ActionModule, self).run(task_vars=task_vars)
| mit |
YannickDieter/testbeam_analysis | testbeam_analysis/testing/test_track_analysis.py | 1 | 13492 | ''' Script to check the correctness of the analysis. The analysis is done on raw data and all results are compared to a recorded analysis.
'''
import os
import shutil
import unittest
from testbeam_analysis import track_analysis
from testbeam_analysis.tools import analysis_utils, test_tools
testing_path = os.path.dirname(__file__)
class TestTrackAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# virtual X server for plots under headless LINUX travis testing is needed
if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux':
from xvfbwrapper import Xvfb
cls.vdisplay = Xvfb()
cls.vdisplay.start()
cls.output_folder = 'tmp_track_test_output'
test_tools.create_folder(cls.output_folder)
cls.pixel_size = (250, 50) # in um
@classmethod
def tearDownClass(cls): # Remove created files
shutil.rmtree(cls.output_folder)
def test_track_finding(self):
# Test 1:
track_analysis.find_tracks(input_tracklets_file=analysis_utils.get_data('fixtures/track_analysis/Tracklets_small.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Tracklets_small.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_track_candidates_file=os.path.join(self.output_folder, 'TrackCandidates.h5'))
data_equal, error_msg = test_tools.compare_h5_files(analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')), os.path.join(self.output_folder, 'TrackCandidates.h5'))
self.assertTrue(data_equal, msg=error_msg)
# Test 2: chunked
track_analysis.find_tracks(input_tracklets_file=analysis_utils.get_data('fixtures/track_analysis/Tracklets_small.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Tracklets_small.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_track_candidates_file=os.path.join(self.output_folder, 'TrackCandidates_2.h5'),
chunk_size=293)
data_equal, error_msg = test_tools.compare_h5_files(analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
os.path.join(self.output_folder, 'TrackCandidates_2.h5'))
self.assertTrue(data_equal, msg=error_msg)
def test_track_fitting(self):
# Test 1: Fit DUTs and always exclude one DUT (normal mode for unbiased residuals and efficiency determination)
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks.h5'),
selection_track_quality=1,
force_prealignment=True)
data_equal, error_msg = test_tools.compare_h5_files(analysis_utils.get_data('fixtures/track_analysis/Tracks_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Tracks_result.h5')),
os.path.join(self.output_folder, 'Tracks.h5'), exact=False)
self.assertTrue(data_equal, msg=error_msg)
# Test 2: As test 1 but chunked data analysis, should result in the same tracks
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks_2.h5'),
selection_track_quality=1,
force_prealignment=True,
chunk_size=4999)
data_equal, error_msg = test_tools.compare_h5_files(analysis_utils.get_data('fixtures/track_analysis/Tracks_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Tracks_result.h5')),
os.path.join(self.output_folder, 'Tracks_2.h5'), exact=False)
self.assertTrue(data_equal, msg=error_msg)
# Test 3: Fit all DUTs at once (special mode for constrained residuals)
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks_All.h5'),
exclude_dut_hit=False,
selection_track_quality=1,
force_prealignment=True)
# Fit DUTs consecutevly, but use always the same DUTs. Should result in the same data as above
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path, 'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks_All_Iter.h5'),
selection_hit_duts=range(4),
exclude_dut_hit=False,
selection_track_quality=1,
force_prealignment=True)
data_equal, error_msg = test_tools.compare_h5_files(os.path.join(self.output_folder, 'Tracks_All.h5'), os.path.join(self.output_folder, 'Tracks_All_Iter.h5'), exact=False)
self.assertTrue(data_equal, msg=error_msg)
# Fit DUTs consecutevly, but use always the same DUTs defined for each DUT separately. Should result in the same data as above
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks_All_Iter_2.h5'),
selection_hit_duts=[range(4), range(4), range(4), range(4)],
exclude_dut_hit=False,
selection_track_quality=1,
force_prealignment=True)
data_equal, error_msg = test_tools.compare_h5_files(os.path.join(self.output_folder, 'Tracks_All.h5'), os.path.join(self.output_folder, 'Tracks_All_Iter_2.h5'), exact=False)
self.assertTrue(data_equal, msg=error_msg)
# Fit tracks and eliminate merged tracks
track_analysis.fit_tracks(input_track_candidates_file=analysis_utils.get_data('fixtures/track_analysis/TrackCandidates_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/TrackCandidates_result.h5')),
input_alignment_file=analysis_utils.get_data('fixtures/track_analysis/Alignment_result.h5',
output=os.path.join(testing_path,
'fixtures/track_analysis/Alignment_result.h5')),
output_tracks_file=os.path.join(self.output_folder, 'Tracks_merged.h5'),
selection_track_quality=1,
min_track_distance=True, # Activate track merge cut,
force_prealignment=True)
data_equal, error_msg = test_tools.compare_h5_files(analysis_utils.get_data('fixtures/track_analysis/Tracks_merged_result.h5',
output=os.path.join(testing_path, 'fixtures/track_analysis/Tracks_merged_result.h5')), os.path.join(self.output_folder, 'Tracks_merged.h5'), exact=False)
self.assertTrue(data_equal, msg=error_msg)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrackAnalysis)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
natduca/ndbg | ui/overlay.py | 1 | 11736 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An overlay is a set of additions to the debugger UI made by a specific module.
# This includes menus, hotkeys, and so on.
#
# Overlays can be enabled and disabled. When disabled, this causes menus specific
# to the overlay to disappear.
#
# Use of overlays ensures that individual tabs in the UI can contribute to the overall
# UI without having to in-turn centralize the UI in a single implementation import gtk
from util import *
from resources import *
class MainWindowOverlay(object):
@staticmethod
def _register_tab_prefs(settings):
settings.register("TabPanelAssignments", dict, {})
@staticmethod
def _update_tab_prefs(settings,mw,layout):
# log1("Updating prefs for layout %s", layout)
import copy
tab_panels = copy.deepcopy(settings.TabPanelAssignments)
needs_commit = False
for ovl in mw.overlays:
for tab in ovl._tabs:
panel = tab.get_parent()
if panel and panel.get_property('visible'):
if not tab_panels.has_key(layout):
tab_panels[layout] = {}
needs_commit = True
if not tab_panels[layout].has_key(tab.id):
needs_commit = True
else:
if tab_panels[layout][tab.id] != panel.id:
needs_commit = True
# if needs_commit:
# log2("%s: %s parent = %s",layout, tab.id, panel.id)
tab_panels[layout][tab.id] = panel.id
if needs_commit:
settings.TabPanelAssignments = tab_panels
@staticmethod
def set_layout(settings, mw, layout):
for ovl in mw.overlays:
ovl.layout = layout
def __init__(self,mw,settings,name,initial_layout):
MainWindowOverlay._register_tab_prefs(settings)
self._settings = settings
self._mw = mw
self._name = name
self._items = []
self._tab_items = []
self._ag = gtk.AccelGroup()
self._f10_item = None
self._hotkeys = []
self._attached = False
self._visible = True
self._enabled = True
self._tabs = []
self._layout = initial_layout
self._layout_changing = False
self.attached = True # trigger ag attachment
def destroy(self):
for i in self._items:
i.destroy()
def add_file_menu_item(self,resource_name,cb,userdata=None):
return self._add_menu_item('file_menu',resource_name,cb,userdata)
def add_debug_menu_item(self,resource_name,cb,userdata=None):
return self._add_menu_item('debug_menu',resource_name,cb,userdata)
def add_tabs_menu_item(self,resource_name,cb,userdata=None):
return self._add_menu_item('tabs_menu',resource_name,cb,userdata)
def add_tools_menu_item(self,resource_name,cb,userdata=None):
return self._add_menu_item('tools_menu',resource_name,cb,userdata)
def add_keyboard_action(self,resource_name,cb):
# print "Add %s %x" % (keyname,modifiers)
resource = self._mw.resources.get_resource_of_type(KeyboardActionResource,resource_name)
a = DynObject()
a.keyname = resource.keyname
a.modifiers = resource.modifiers
a.cb = cb
self._hotkeys.append(a)
def find_tab(self,tab_type):
"""
Finds a tab of a given type.
"""
for t in self._tabs:
if type(t) == tab_type:
return t
def find_tab_by_id(self,id):
"""
Finds a tab of a given type.
"""
for t in self._tabs:
if t.id == id:
return t
@property
def name(self):
return self._name
@property
def tabs(self):
return list(self._tabs)
@property
def attached(self):
return self._attached
@attached.setter
def attached(self,v):
if type(v) != bool:
raise TypeError("Expected bool")
if self._attached == v:
return
if self._attached and v == False:
for i in self._items:
i.detach()
self._mw.remove_accel_group(self._ag)
elif self._attached == False and v == True:
self._mw.add_accel_group(self._ag)
for i in self._items:
i.attach()
self._attached = v
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self,v):
if type(v) != bool:
raise TypeError("Expected bool")
if self._enabled == v:
return
if v:
for i in self._items:
i.enable()
else:
for i in self._items:
i.disable()
self._enabled = v
@property
def visible(self):
return self._visible
@visible.setter
def visible(self,v):
if type(v) != bool:
raise TypeError("Expected bool")
# if self._visible == v:
# return
if v:
for i in self._items:
i.show()
else:
for i in self._items:
i.hide()
self._visible = v
@property
def layout(self):
return self._layout
@layout.setter
def layout(self,layout):
self._layout_changing = True
self._layout = layout
# change owners of all tabs
for oitem in self._tab_items:
oitem.detach()
oitem.attach() # reattaches to new layout
self._layout_changing = False
MainWindowOverlay._update_tab_prefs(self._settings, self._mw, layout)
def _add_menu_item(self,base_menu_name,resource_name,cb,userdata):
resource = self._mw.resources.get_resource_of_type(MenuItemResource, resource_name)
text = resource.text
key = resource.keyname
mod = resource.modifiers
item = gtk.MenuItem(text)
if key == 0 or key == None:
kv = 0
else:
kv = gtk.accelerator_parse(key)[0]
if kv != 0:
item.add_accelerator("activate", self._ag, kv, mod, gtk.ACCEL_VISIBLE)
if key == 'F10' and mod == 0:
self._f10_item = item
def dispatch(a,b):
if item.get_sensitive():
cb(a,b)
item.connect("activate", dispatch,userdata)
# def on_show(*args):
# print "%s shown"% item.get_label()
# import pdb; pdb.set_trace()
# item.connect("show", on_show)
def attach():
m = getattr(self._mw, base_menu_name)
m.append(item)
if self._visible:
item.show()
m.show()
def detach():
m = getattr(self._mw, base_menu_name)
m.remove(item)
if len(m.get_children()) == 0:
m.hide()
def show():
item.show()
def hide():
item.hide()
def enable():
item.set_sensitive(True)
def disable():
item.set_sensitive(False)
oitem = _OverlayItem()
oitem.attach = attach
oitem.detach = detach
oitem.show = show
oitem.hide = hide
oitem.enable = enable
oitem.disable = disable
self._items.append(oitem)
self._tab_items.append(oitem)
oitem.init(self) # make sure that the new item is sync'd with our attach/enable/visible state
def add_tab(self,tab,tab_id):
if tab_id in self._mw._ids_in_use:
raise Exception("ID %s is already in use." % tab_id)
self._mw._ids_in_use.add(tab_id)
tab.id = tab_id
resource = self._mw.resources.get_resource_of_type(TabPageResource,tab_id)
self._tabs.append(tab)
def attach():
if self._mw.panels.has_key(resource.panel_id):
panel = self._mw.panels[resource.panel_id]
else:
log0("Unrecognized panel %s in resource %s", resource.panel_id, tab_id)
panel = self._mw.panels["panel1"]
if self._settings.TabPanelAssignments.has_key(self._layout):
if self._settings.TabPanelAssignments[self._layout].has_key(tab_id):
panel_id = self._settings.TabPanelAssignments[self._layout][tab_id]
if self._mw.panels.has_key(panel_id):
panel = self._mw.panels[panel_id]
# print "%s: tab %s using panel from settings %s" % (self._layout, tab_id, panel.id)
else:
log0("Unrecognized panel in setting: %s" % panel_id)
else:
# print "%s: tab %s using default panel assignment %s (no specific assignment)" % (self._layout, tab_id, panel.id)
pass
else:
# print "%s: tab %s using default panel assignment %s (no layout)" % (self._layout, tab_id, panel.id)
pass
panel.add_tab(tab,resource.title, self)
def detach():
panel = tab.get_parent()
assert isinstance(panel, TabPanel)
panel.remove_tab(tab)
def show():
tab.show_all()
tab.get_parent().update_visibility()
def hide():
tab.hide()
visible = False
p = tab.get_parent()
p.update_visibility()
def enable():
tab.set_sensitive(True)
def disable():
tab.set_sensitive(False)
def destroy():
if tab.destroy:
tab.destroy()
oitem = _OverlayItem()
oitem.attach = attach
oitem.detach = detach
oitem.show = show
oitem.hide = hide
oitem.enable = enable
oitem.disable = disable
# oitem.destroy = destroy
if not tab.get_property('visible'):
log0("Warning: tab %s was added but was not shown.", tab_id)
tab.show()
self._items.append(oitem)
oitem.init(self) # make sure that the new item is sync'd with our attach/enable/visible state
def on_tab_panel_changed(self, tab, panel):
if self._layout_changing:
return
MainWindowOverlay._update_tab_prefs(self._settings, self._mw, self._layout)
def _handle_key_press(self,event):
# print "handle f10 and f10_teim is %s" % self._f10_item
if self._attached == False or self._enabled == False or self._visible == False:
return
if event.is_modifier:
return
keyname = gtk.gdk.keyval_name(event.keyval)
# log3("%s: Processsing key %s mod=0x%x", self.name, keyname,event.state)
if keyname == 'F10':
if self._mw and self._f10_item:
self._f10_item.emit('activate')
return True
else:
for a in self._hotkeys:
# log3("%s: Considering %s %x", self.name, a.keyname, a.modifiers)
if a.keyname == keyname and a.modifiers == (event.state):
log3("%s: will handle %s %x", self.name, a.keyname, a.modifiers)
a.cb()
return True
return False
class _OverlayItem(object):
def __init__(self):
self._attach = lambda : None
self._detach = lambda : None
self._show = lambda : None
self._hide = lambda : None
self._destroy = lambda: None
self._enable = lambda : None
self._disable = lambda : None
@property
def attach(self):
return self._attach
@attach.setter
def attach(self,v):
self._attach = v
@property
def detach(self):
return self._detach
@detach.setter
def detach(self,v):
self._detach = v
@property
def enable(self):
return self._enable
@enable.setter
def enable(self,v):
self._enable = v
@property
def disable(self):
return self._disable
@disable.setter
def disable(self,v):
self._disable = v
@property
def show(self):
return self._show
@show.setter
def show(self,v):
self._show = v
@property
def hide(self):
return self._hide
@hide.setter
def hide(self,v):
self._hide = v
@property
def destroy(self):
return self._destroy
@destroy.setter
def destroy(self, v):
self._destroy = v
def init(self,ovl):
if ovl.attached:
self._attach()
if ovl.visible == False:
self._hide()
if ovl.enabled == False:
self._disable()
| apache-2.0 |
codingforentrepreneurs/srvup-rest-framework | src/srvup/urls.py | 2 | 3803 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import routers
from comments.serializers import CommentViewSet
from comments.views import CommentCreateAPIView, CommentDetailAPIView, CommentListAPIView
from videos.serializers import CategoryViewSet, VideoViewSet
from videos.views import CategoryListAPIView, CategoryDetailAPIView, VideoDetailAPIView
router = routers.DefaultRouter()
router.register(r"categories", CategoryViewSet)
router.register(r"comments", CommentViewSet)
router.register(r"videos", VideoViewSet)
urlpatterns = patterns('',
url(r'^jquery-test/$', 'srvup.views.jquery_test_view'),
url(r'^api2/$', 'srvup.views.api_home_abc', name='api_home'),
url(r'^api2/comment/$',
CommentListAPIView.as_view(),
name='comment_list_api',
),
url(r'^api2/comment/create/$',
CommentCreateAPIView.as_view(),
name='comment_create_api',
),
url(r'^api2/comment/(?P<id>\d+)/$',
CommentDetailAPIView.as_view(),
name='comment_detail_api',
),
url(r'^api2/projects/$',
CategoryListAPIView.as_view(),
name='category_list_api'),
url(r'^api2/projects/(?P<slug>[\w-]+)/$',
CategoryDetailAPIView.as_view(),
name='category_detail_api'),
url(r'^api2/projects/(?P<cat_slug>[\w-]+)/(?P<vid_slug>[\w-]+)/$',
VideoDetailAPIView.as_view(),
name='video_detail_api'),
# Examples:
#url(r'^about/$', TemplateView.as_view(template_name='base.html'), name='home'),
#url(r'^pricing/$', TemplateView.as_view(template_name='base.html'), name='home'),
url(r'^contact/$', TemplateView.as_view(template_name='company/contact_us.html'), name='contact_us'),
url(r'^$', 'srvup.views.home', name='home'),
url(r'^projects/$', 'videos.views.category_list', name='projects'),
url(r'^projects/(?P<cat_slug>[\w-]+)/$', 'videos.views.category_detail', name='project_detail'),
url(r'^projects/(?P<cat_slug>[\w-]+)/(?P<vid_slug>[\w-]+)/$', 'videos.views.video_detail', name='video_detail'),
url(r'^dj/admin/', include(admin.site.urls)),
url(r'^api/auth/token/$', 'rest_framework_jwt.views.obtain_jwt_token'),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include(router.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += patterns('',) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#auth login/logout
urlpatterns += patterns('billing.views',
url(r'^upgrade/$', 'upgrade', name='account_upgrade'),
url(r'^billing/$', 'billing_history', name='billing_history'),
url(r'^billing/cancel/$', 'cancel_subscription', name='cancel_subscription'),
)
#auth login/logout
urlpatterns += patterns('accounts.views',
url(r'^account/$', 'account_home', name='account_home'),
url(r'^logout/$', 'auth_logout', name='logout'),
url(r'^login/$', 'auth_login', name='login'),
url(r'^register/$', 'auth_register', name='register'),
)
#Comment Thread
urlpatterns += patterns('comments.views',
url(r'^comment/(?P<id>\d+)$', 'comment_thread', name='comment_thread'),
url(r'^comment/create/$', 'comment_create_view', name='comment_create'),
)
#Notifications
urlpatterns += patterns('notifications.views',
url(r'^notifications/$', 'all', name='notifications_all'),
url(r'^notifications/ajax/$', 'get_notifications_ajax', name='get_notifications_ajax'),
url(r'^notifications/(?P<id>\d+)/$', 'read', name='notifications_read'),
)
| apache-2.0 |
LUTAN/tensorflow | tensorflow/contrib/keras/api/keras/preprocessing/__init__.py | 132 | 1094 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
| apache-2.0 |
nblock/ofxstatement-austrian | src/ofxstatement/plugins/easybank.py | 1 | 6467 | #!/usr/bin/env python3
# This file is part of ofxstatement-austrian.
# See README.rst for more information.
import csv
import re
from ofxstatement import statement
from ofxstatement.parser import CsvStatementParser
from ofxstatement.plugin import Plugin
from ofxstatement.statement import generate_transaction_id
from ofxstatement.plugins.utils \
import clean_multiple_whitespaces, fix_amount_string
class EasybankCsvParser(CsvStatementParser):
"""The csv parser for Easybank (base)."""
date_format = "%d.%m.%Y"
def split_records(self):
"""Split records using a custom dialect."""
return csv.reader(self.fin, delimiter=";")
class EasybankCreditCardCsvParser(EasybankCsvParser):
"""The csv parser for Easybank (credit card)."""
mappings = {
"memo": 1,
"id": 2,
"date": 3,
"amount": 5,
}
def parse(self):
"""Parse."""
stmt = super(EasybankCreditCardCsvParser, self).parse()
statement.recalculate_balance(stmt)
return stmt
def parse_record(self, line):
"""Parse a single record."""
# Split the description into two parts and save it to the line list.
parts = line[1].split('|')
# 3 parts: Description, foreign language, transaction id
# 2 parts: Description, transaction id
if len(parts) == 3:
line[1] = "{} ({})".format(parts[0], parts[1])
else:
line[1] = parts[0]
line.insert(2, parts[-1])
# Account id
if not self.statement.account_id:
self.statement.account_id = line[0]
# Currency
if not self.statement.currency:
self.statement.currency = line[6]
# Cleanup amount
line[5] = fix_amount_string(line[5])
line[1] = clean_multiple_whitespaces(line[1])
# Create statement and fixup missing parts
stmtline = super(EasybankCreditCardCsvParser, self).parse_record(line)
stmtline.trntype = 'DEBIT' if stmtline.amount < 0 else 'CREDIT'
return stmtline
class EasybankGiroCsvParser(EasybankCsvParser):
"""The csv parser for Easybank (giro)."""
mappings = {
"check_no": 1,
"memo": 2,
"payee": 3,
"date": 4,
"amount": 6,
}
reg_description = re.compile(r'[A-Z]{2}/000[0-9]{6}')
reg_iban = re.compile(
r'([A-Z]{6}[A-Z0-9]{2}[^\s]*)?\s?([A-Z]{2}[0-9]{10,34})\s(.*)')
reg_legacy = re.compile(r'(.*)([0-9]{5,})\s([0-9]{6,})(.*)')
def extract_check_no(self, description):
'''Try to extract the statement check_no.'''
result = ''
mo = self.reg_description.search(description)
if mo:
result = str(int(mo.group(0).split('/')[1]))
return result
def extract_description(self, description):
'''Cleanup description from a giro account.'''
# extract iban/bic, account number, ...
parts = [x.strip() for x in self.reg_description.split(description)]
# parts: memo, transaction
if not parts[1]:
return parts[0], parts[0]
# parts: memo, transaction, banking information
else:
# extract iban, bic and text
iban_bic = self.reg_iban.search(parts[1])
if iban_bic:
# iban, bic and text
if iban_bic.group(1):
result = '{0} ({1} {2})'.format(iban_bic.group(3),
iban_bic.group(2),
iban_bic.group(1))
# iban only
else:
result = '{0} ({1})'.format(
iban_bic.group(3), iban_bic.group(2))
return parts[0], result
# extract legacy banking number
account_number = self.reg_legacy.search(parts[1])
if account_number:
if account_number.group(1):
text = account_number.group(1).strip()
else:
text = account_number.group(4).strip()
return parts[0], '{0} ({1} {2})'.format(
text, account_number.group(3), account_number.group(2))
# Could not extract anything useful, return parts as is.
return parts[0], parts[1]
def parse(self):
"""Parse."""
stmt = super(EasybankGiroCsvParser, self).parse()
statement.recalculate_balance(stmt)
return stmt
def parse_record(self, line):
"""Parse a single record."""
# Extract check_no/id
description = line[1]
del line[1]
# Get check_no from description
line.insert(1, self.extract_check_no(description))
# Get memo and payee from description
tt = self.extract_description(description)
line.insert(2, tt[0])
line.insert(3, tt[1])
# line.insert(2, self.extract_description(description))
# Account id
if not self.statement.account_id:
self.statement.account_id = line[0]
# Currency
if not self.statement.currency:
self.statement.currency = line[7]
# Cleanup parts
line[6] = fix_amount_string(line[6])
line[2] = clean_multiple_whitespaces(line[2])
line[3] = clean_multiple_whitespaces(line[3])
# Create statement and fixup missing parts
stmtline = super(EasybankGiroCsvParser, self).parse_record(line)
stmtline.trntype = 'DEBIT' if stmtline.amount < 0 else 'CREDIT'
stmtline.id = generate_transaction_id(stmtline)
return stmtline
class EasybankPlugin(Plugin):
"""Easybank (CSV)"""
def determine_parser(self, fp):
"""Determine the parser to use based on the first booking line."""
description = fp.readline().split(";")[1]
fp.seek(0) # reset pointer
if '|' in description:
return EasybankCreditCardCsvParser(fp)
else:
return EasybankGiroCsvParser(fp)
def get_parser(self, filename):
"""Get a parser instance."""
encoding = self.settings.get('charset', 'cp1252')
f = open(filename, 'r', encoding=encoding)
parser = self.determine_parser(f)
parser.statement.bank_id = self.settings.get('bank', 'Easybank')
return parser
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 smartindent autoindent
| gpl-3.0 |
winklerand/pandas | pandas/core/computation/expressions.py | 4 | 7066 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas.core.common import _values_from_object
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatiblity
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(_values_from_object(cond), _values_from_object(a),
_values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| bsd-3-clause |
Workday/OpenFrame | chrome/test/chromedriver/test/waterfall_builder_monitor.py | 43 | 8030 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Waterfall monitoring script.
This script checks all builders specified in the config file and sends
status email about any step failures in these builders. This also
reports a build as failure if the latest build on that builder was built
2 days back. (Number of days can be configured in the config file)
This script can be run as cronjob on a linux machine once a day and
get email notification for any waterfall specified in the config file.
Sample cronjob entry below. This entry will run the script everyday at 9 AM.
Include this in the crontab file.
0 9 * * * <Path to script> --config <Path to json file>
"""
import datetime
import json
import optparse
import sys
import time
import traceback
import urllib
from datetime import timedelta
from email.mime.text import MIMEText
from subprocess import Popen, PIPE
SUCCESS_SUBJECT = ('[CHROME TESTING]: Builder status %s: PASSED.')
FAILURE_SUBJECT = ('[CHROME TESTING]: Builder status %s: FAILED %d out of %d')
EXCEPTION_SUBJECT = ('Exception occurred running waterfall_builder_monitor.py '
'script')
def GetTimeDelta(date, days):
if isinstance(date, datetime.datetime):
return date + timedelta(days)
def GetDateFromEpochFormat(epoch_time):
last_build_date = time.localtime(epoch_time)
last_build_date = datetime.datetime(int(last_build_date.tm_year),
int(last_build_date.tm_mon),
int(last_build_date.tm_mday),
int(last_build_date.tm_hour),
int(last_build_date.tm_min),
int(last_build_date.tm_sec))
return last_build_date
def GetJSONData(json_url):
response = urllib.urlopen(json_url)
if response.getcode() == 200:
try:
data = json.loads(response.read())
except ValueError:
print 'ValueError for JSON URL: %s' % json_url
raise
else:
raise Exception('Error from URL: %s' % json_url)
response.close()
return data
def SendEmailViaSendmailCommand(sender_email, recipient_emails,
subject, email_body):
msg = MIMEText(email_body)
msg["From"] = sender_email
msg["To"] = recipient_emails
msg["Subject"] = subject
pipe = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
pipe.communicate(msg.as_string())
def SendStatusEmailViaSendmailCommand(consolidated_results,
recipient_emails,
sender_email):
failure_count = 0
for result in consolidated_results:
if result['error'] != 'passed' and not result['build_too_old']:
failure_count += 1
today = str(datetime.date.today()).replace('-', '/')[5:]
if failure_count == 0:
subject = SUCCESS_SUBJECT % today
else:
subject = FAILURE_SUBJECT % (today,
failure_count,
len(consolidated_results))
email_body = ''
for result in consolidated_results:
if result['error'] != 'passed' or result['build_too_old']:
if result['build_date'] is not None:
email_body += result['platform'] + ': ' +\
result['build_link'] + ' ( Build too old: ' +\
result['build_date'] + ' ) ' +'\n\n'
else:
email_body += result['platform'] + ': ' +\
result['build_link'] + '\n\n'
SendEmailViaSendmailCommand(sender_email, recipient_emails,
subject, email_body)
def SendExceptionEmailViaSendmailCommand(exception_message_lines,
recipient_emails,
sender_email):
subject = EXCEPTION_SUBJECT
email_body = ''
email_body = '\n'.join(exception_message_lines)
SendEmailViaSendmailCommand(sender_email, recipient_emails,
subject, email_body)
class OfficialBuilderParser(object):
"""This class implements basic utility functions on a specified builder."""
def __init__(self, builder_type, build_info):
self.platform = builder_type
self.builder_info = build_info
self.builder_url = build_info['builder_url']
self.build_json_url = build_info['json_url']
self.build = self._GetLatestBuildNumber()
def _GetLatestBuildNumber(self):
json_url = self.builder_info['builds_url']
data = GetJSONData(json_url)
# Get a sorted list of all the keys in the json data.
keys = sorted(map(int, data.keys()))
return self._GetLatestCompletedBuild(keys)
def _GetLatestCompletedBuild(self, keys):
reversed_list = keys[::-1]
for build in reversed_list:
data = self._GetJSONDataForBuild(build)
if data is not None:
if 'text' in data:
return build
return None
def _GetJSONDataForBuild(self, build):
if build is None:
return build
json_url = self.build_json_url % build
return GetJSONData(json_url)
class GetBuilderStatus(OfficialBuilderParser):
def __init__(self, builder_type, build_info):
OfficialBuilderParser.__init__(self, builder_type, build_info)
def CheckForFailedSteps(self, days):
if self.build is None:
return {}
result = {'platform': self.platform,
'build_number': self.build,
'build_link': self.builder_url + str(self.build),
'build_date': None,
'build_too_old': False,
'error': 'unknown'}
data = self._GetJSONDataForBuild(self.build)
if data is not None:
if 'text' in data:
if 'build' in data['text'] and 'successful' in data['text']:
result['error'] = 'passed'
else:
if 'failed' in data['text'] or\
'exception' in data['text'] or\
'interrupted' in data['text']:
result['error'] = 'failed'
if 'times' in data:
old_date = GetTimeDelta(datetime.datetime.now(), days)
last_build_date = GetDateFromEpochFormat(data['times'][0])
if last_build_date < old_date:
result['build_too_old'] = True
result['build_date'] = str(last_build_date).split(' ')[0]
else:
raise Exception('There was some problem getting JSON data '
'from URL: %s' % result['build_link'])
return result
def main():
parser = optparse.OptionParser()
parser.add_option('--config', type='str',
help='Absolute path to the config file.')
(options, _) = parser.parse_args()
if not options.config:
print 'Error: missing required parameter: --config'
parser.print_help()
return 1
try:
with open(options.config, 'r') as config_file:
try:
json_data = json.loads(config_file.read())
except ValueError:
print 'ValueError for loading JSON data from : %s' % options.config
raise ValueError
old_build_days = -2
if 'old_build_days' in json_data:
old_build_days = - json_data['old_build_days']
consolidated_results = []
for key in json_data['build_info'].keys():
builder_status = GetBuilderStatus(key, json_data['build_info'][key])
builder_result = builder_status.CheckForFailedSteps(old_build_days)
consolidated_results.append(builder_result)
SendStatusEmailViaSendmailCommand(consolidated_results,
json_data['recipient_emails'],
json_data['sender_email'])
return 0
except Exception:
formatted_lines = traceback.format_exc().splitlines()
SendExceptionEmailViaSendmailCommand(formatted_lines,
json_data['recipient_emails'],
json_data['sender_email'])
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
AndroidDeveloperAlliance/platform_external_skia | bench/bench_compare.py | 27 | 4180 | '''
Created on May 16, 2011
@author: bungeman
'''
import sys
import getopt
import bench_util
def usage():
"""Prints simple usage information."""
print '-o <file> the old bench output file.'
print '-n <file> the new bench output file.'
print '-h causes headers to be output.'
print '-f <fieldSpec> which fields to output and in what order.'
print ' Not specifying is the same as -f "bctondp".'
print ' b: bench'
print ' c: config'
print ' t: time type'
print ' o: old time'
print ' n: new time'
print ' d: diff'
print ' p: percent diff'
class BenchDiff:
"""A compare between data points produced by bench.
(BenchDataPoint, BenchDataPoint)"""
def __init__(self, old, new):
self.old = old
self.new = new
self.diff = old.time - new.time
diffp = 0
if old.time != 0:
diffp = self.diff / old.time
self.diffp = diffp
def __repr__(self):
return "BenchDiff(%s, %s)" % (
str(self.new),
str(self.old),
)
def main():
"""Parses command line and writes output."""
try:
opts, _ = getopt.getopt(sys.argv[1:], "f:o:n:h")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
column_formats = {
'b' : '{bench: >28} ',
'c' : '{config: <4} ',
't' : '{time_type: <4} ',
'o' : '{old_time: >10.2f} ',
'n' : '{new_time: >10.2f} ',
'd' : '{diff: >+10.2f} ',
'p' : '{diffp: >+8.1%} ',
}
header_formats = {
'b' : '{bench: >28} ',
'c' : '{config: <4} ',
't' : '{time_type: <4} ',
'o' : '{old_time: >10} ',
'n' : '{new_time: >10} ',
'd' : '{diff: >10} ',
'p' : '{diffp: >8} ',
}
old = None
new = None
column_format = ""
header_format = ""
columns = 'bctondp'
header = False
for option, value in opts:
if option == "-o":
old = value
elif option == "-n":
new = value
elif option == "-h":
header = True
elif option == "-f":
columns = value
else:
usage()
assert False, "unhandled option"
if old is None or new is None:
usage()
sys.exit(2)
for column_char in columns:
if column_formats[column_char]:
column_format += column_formats[column_char]
header_format += header_formats[column_char]
else:
usage()
sys.exit(2)
if header:
print header_format.format(
bench='bench'
, config='conf'
, time_type='time'
, old_time='old'
, new_time='new'
, diff='diff'
, diffp='diffP'
)
old_benches = bench_util.parse({}, open(old, 'r'))
new_benches = bench_util.parse({}, open(new, 'r'))
bench_diffs = []
for old_bench in old_benches:
#filter new_benches for benches that match old_bench
new_bench_match = [bench for bench in new_benches
if old_bench.bench == bench.bench and
old_bench.config == bench.config and
old_bench.time_type == bench.time_type
]
if (len(new_bench_match) < 1):
continue
bench_diffs.append(BenchDiff(old_bench, new_bench_match[0]))
bench_diffs.sort(key=lambda d : [d.diffp,
d.old.bench,
d.old.config,
d.old.time_type,
])
for bench_diff in bench_diffs:
print column_format.format(
bench=bench_diff.old.bench.strip()
, config=bench_diff.old.config.strip()
, time_type=bench_diff.old.time_type
, old_time=bench_diff.old.time
, new_time=bench_diff.new.time
, diff=bench_diff.diff
, diffp=bench_diff.diffp
)
if __name__ == "__main__":
main()
| bsd-3-clause |
cuauv/software | vision/modules/old/2018/roulette.py | 1 | 16701 | #!/usr/bin/env python3
# Written by Chesley Tan.
# Tweaked by Will Smith.
import traceback
import time
import sys
import math
import itertools
import cv2
import numpy as np
import shm
from vision.modules.base import ModuleBase
from vision.framework.color import bgr_to_lab, elementwise_color_dist, range_threshold
from vision.framework.draw import draw_line, draw_circle, draw_contours
from vision.framework.helpers import to_umat, to_odd
from vision.framework.feature import outer_contours, contour_centroid, contour_area, simple_canny, line_polar_to_cartesian, find_lines
from vision.framework.transform import erode, rect_kernel, simple_gaussian_blur
from vision import options
from vision.modules.will_common import find_best_match
options = [
options.BoolOption('debug', False),
options.IntOption('red_lab_a_min', 140, 0, 255),
options.IntOption('red_lab_a_max', 255, 0, 255),
options.IntOption('black_lab_l_min', 0, 0, 255),
options.IntOption('black_lab_l_max', 150, 0, 255),
options.IntOption('green_lab_a_min', 0, 0, 255),
options.IntOption('green_lab_a_max', 120, 0, 255),
options.IntOption('color_dist_min_green_funnel', 0, 0, 255),
options.IntOption('color_dist_max_green_funnel', 50, 0, 255),
options.IntOption('blur_kernel', 17, 0, 255),
options.IntOption('erode_kernel', 5, 0, 255),
options.IntOption('black_erode_iters', 5, 0, 100),
options.IntOption('hough_lines_rho', 5, 1, 1000),
options.IntOption('hough_lines_theta', 1, 1, 1000),
options.IntOption('hough_lines_thresh', 100, 0, 1000),
options.IntOption('contour_min_area', 1000, 0, 100000),
options.DoubleOption('percent_red_thresh', 0.05, 0, 1),
]
POST_UMAT = True
ROTATION_PREDICTION_ANGLE = 20
DOWNWARD_CAM_WIDTH = shm.camera.downward_width.get()
DOWNWARD_CAM_HEIGHT = shm.camera.downward_height.get()
# 30 for green, 75 for red
TARGET_ANGLE = 30
def within_camera(x, y):
return 0 <= x < DOWNWARD_CAM_WIDTH and 0 <= y < DOWNWARD_CAM_HEIGHT
def predict_xy(board_center_x, board_center_y, x, y):
translated_x = x - board_center_x
translated_y = y - board_center_y
predicted_x = translated_x * np.cos(np.radians(ROTATION_PREDICTION_ANGLE)) - translated_y * np.sin(np.radians(ROTATION_PREDICTION_ANGLE))
predicted_y = translated_x * np.sin(np.radians(ROTATION_PREDICTION_ANGLE)) + translated_y * np.cos(np.radians(ROTATION_PREDICTION_ANGLE))
predicted_x = int(predicted_x + board_center_x)
predicted_y = int(predicted_y + board_center_y)
return predicted_x, predicted_y
def calc_diff(new_centers, old_centers):
return sum([dist(new, old) for (new, old) in zip(new_centers, old_centers)])
def dist(a, b):
return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def angle_diff(a, b):
return math.atan2(math.sin(b - a), math.cos(b - a))
# This is the same as the function in vision/modules/will_common, but I'm too
# lazy to change this right now
def assign_bins(contours, bins_data, module_context):
# Keeps track of which bin is which by sorting all possible lists by the sum of the
# differences in the old and new centroids
# Find new and old centers of the two bins
old_centers = [(bin.shm_group.centroid_x.get(), bin.shm_group.centroid_y.get()) for bin in bins_data]
new_centers = [contour_centroid(contour) for contour in contours]
permutations = itertools.permutations(new_centers)
diffs = [(centers, calc_diff(centers, old_centers)) for centers in permutations]
sorted_diffs = sorted(diffs, key=lambda tup: tup[1])
best_permutation = sorted_diffs[0][0]
for i in range(min(len(bins_data), len(best_permutation))):
bins_data[i].visible = True
bins_data[i].centroid_x = best_permutation[i][0]
bins_data[i].centroid_y = best_permutation[i][1]
class RouletteBoardData:
def __init__(self, shm_group):
self.shm_group = shm_group
self.reset()
def reset(self):
self.visible = False
self.center_x = 0
self.center_y = 0
def commit(self):
results = self.shm_group.get()
results.board_visible = self.visible
results.center_x = self.center_x
results.center_y = self.center_y
self.shm_group.set(results)
class BinsData:
def __init__(self, shm_group):
self.shm_group = shm_group
self.reset()
def reset(self):
self.visible = False
self.centroid_x = 0
self.centroid_y = 0
self.predicted_location = False
self.predicted_x = 0
self.predicted_y = 0
self.angle = 0
def commit(self):
results = self.shm_group.get()
results.visible = self.visible
results.centroid_x = self.centroid_x
results.centroid_y = self.centroid_y
results.predicted_location = self.predicted_location
results.predicted_x = self.predicted_x
results.predicted_y = self.predicted_y
results.angle = self.angle
self.shm_group.set(results)
ROULETTE_BOARD = RouletteBoardData(shm.bins_vision)
GREEN_BINS = [BinsData(shm.bins_green0), BinsData(shm.bins_green1)]
RED_BINS = [BinsData(shm.bins_red0), BinsData(shm.bins_red1)]
BLACK_BINS = [BinsData(shm.bins_black0), BinsData(shm.bins_black1)]
ALL_BINS = GREEN_BINS + RED_BINS + BLACK_BINS
ALL_SHM = [ROULETTE_BOARD] + ALL_BINS
class Roulette(ModuleBase):
last_run = 0
def process(self, mat):
global DOWNWARD_CAM_WIDTH, DOWNWARD_CAM_HEIGHT
curr_time = time.time()
if curr_time - self.last_run < shm.vision_module_settings.time_between_frames.get():
return
self.last_run = curr_time
DOWNWARD_CAM_WIDTH = DOWNWARD_CAM_WIDTH or mat.shape[1]
DOWNWARD_CAM_HEIGHT = DOWNWARD_CAM_HEIGHT or mat.shape[0]
mat = to_umat(mat)
debug = self.options['debug']
try:
## Reset SHM output
#for s in ALL_SHM:
# s.reset()
for s in ALL_SHM:
s.visible = False
lab, lab_split = bgr_to_lab(mat)
# detect green section
dist_from_green = elementwise_color_dist(lab, [185, 55, 196])
if debug:
self.post('green_dist', np.abs(dist_from_green).astype('uint8'))
green_threshed = range_threshold(
dist_from_green,
self.options["color_dist_min_green_funnel"],
self.options["color_dist_max_green_funnel"],
)
erode_kernel = rect_kernel(to_odd(self.options['erode_kernel']))
green_threshed = erode(green_threshed, erode_kernel)
# detect red section
red_threshed = range_threshold(lab_split[1],
self.options['red_lab_a_min'],
self.options['red_lab_a_max'])
red_threshed = erode(red_threshed, erode_kernel)
# detect black section
black_threshed = range_threshold(lab_split[0],
self.options['black_lab_l_min'],
self.options['black_lab_l_max'])
black_threshed = erode(black_threshed,
erode_kernel,
iterations=self.options['black_erode_iters'])
if debug and POST_UMAT:
self.post('green_threshed', green_threshed)
self.post('red_threshed', red_threshed)
self.post('black_threshed', black_threshed)
#comp = red_threshed & ~green_threshed
comp = green_threshed
if debug:
self.post('comp', comp)
percent_red = cv2.countNonZero(red_threshed) / \
cv2.countNonZero(red_threshed | green_threshed | black_threshed)
percent_red_thresh = self.options['percent_red_thresh']
# Find center using hough lines
blurred = simple_gaussian_blur(comp, to_odd(self.options['blur_kernel']), 0)
edges = simple_canny(blurred, use_mean=True)
if debug and POST_UMAT:
self.post('edges', edges)
lines_cart, lines_polar = find_lines(edges,
self.options['hough_lines_rho'],
np.radians(self.options['hough_lines_theta']),
self.options['hough_lines_thresh'])
found_center = False
thetas = []
THETA_DIFF = math.radians(15)
if lines_cart:
lines_groups_mat = mat
# Group lines into bins
bins = []
for line_polar, line_cart in zip(lines_polar, lines_cart):
for (idx, bin) in enumerate(bins):
# Multiple by 2 because we're in [0, 180], not [0, 360]
if abs(angle_diff(line_polar[1] * 2, bin[0][1] * 2)) < THETA_DIFF * 2:
bins[idx] = (bin[0], bin[1] + 1)
break
else:
bins.append((line_polar, 1))
draw_line(lines_groups_mat,
(line_cart[0], line_cart[1]),
(line_cart[2], line_cart[3]),
thickness=2)
if debug:
self.post('lines_groups', lines_groups_mat)
# Pick top four - we sometimes get the ends of the bins as lines as well
lines_unpicked = [line for line, count in sorted(bins, key=lambda bin: bin[1], reverse=True)[:4]]
if len(lines_unpicked) >= 2:
target_angle = math.radians(TARGET_ANGLE)
# Find two lines that are about 30 degrees apart
# Find the pairing of lines with the angle difference closest to 30 degrees
pairs = itertools.combinations(lines_unpicked, 2)
# We double angles because we're in [0, 180] and not [0, 360]
lines = sorted(pairs, key=lambda pair: abs(target_angle * 2 - abs(angle_diff(pair[0][1] * 2, pair[1][1] * 2))))[0]
delta = math.degrees(abs(target_angle * 2 - abs(angle_diff(lines[0][1] * 2, lines[1][1] * 2))))
MAX_ANGLE_DIFF = 30
if delta <= MAX_ANGLE_DIFF:
line_equations = []
lines_mat = mat #mat.copy()
for (rho, theta) in lines:
thetas.append(theta)
(x1, y1, x2, y2) = line_polar_to_cartesian(rho, theta)
draw_line(lines_mat, (x1, y1), (x2, y2), (255, 0, 0), thickness=2)
line_equations.append((x1, x2, y1, y2))
if debug and POST_UMAT:
self.post('lines', lines_mat)
found_center = len(line_equations) >= 2 and percent_red >= percent_red_thresh
if found_center:
# calculate intersection of diameters of green section
[x01, x02, y01, y02] = line_equations[0]
[x11, x12, y11, y12] = line_equations[1]
# This is stupid but it works
if x02 == x01:
x01 += 0.01
if x12 == x11:
x11 += 0.01
b1 = (y02 - y01) / (x02 - x01)
b2 = (y12 - y11) / (x12 - x11)
if b1 == b2:
print('ovelapping')
found_center = False
else:
intersection_x = (b1 * x01 - b2 * x11 + y11 - y01) / (b1 - b2)
if math.isinf(intersection_x):
if abs(x02 - x01) < 0.2:
intersection_x = x02
elif abs(x12 - x11) < 0.2:
intersection_x = x12
intersection_y = (b1 * (intersection_x - x01) + y01)
intersection_x = int(intersection_x)
intersection_y = int(intersection_y)
center_x, center_y = intersection_x, intersection_y
else:
found_center = False
if found_center:
center_mat = mat # mat.copy()
draw_circle(center_mat, (center_x, center_y), 7, (255, 255, 255), thickness=-1)
if POST_UMAT:
self.post('center', center_mat)
ROULETTE_BOARD.visible = True
(ROULETTE_BOARD.center_x, ROULETTE_BOARD.center_y) = (center_x, center_y)
if len(thetas) == 2:
x = 0
y = 0
# We multiply angle by 2 for calculating average because
# we want it to be in the range [0,180] instead of [0,360]
for theta in thetas:
if theta > math.pi:
theta -= math.pi*2
x += math.cos(theta*2)
y += math.sin(theta*2)
avg_heading = math.atan2(y, x) * 180 / math.pi / 2
GREEN_BINS[0].angle = avg_heading
# draw centroids of green sections and predict location ~3 seconds later
contours = outer_contours(green_threshed)
contours = sorted(contours, key=contour_area, reverse=True)
bin_index = 0
for contour in contours[:len(GREEN_BINS)]:
centroid_x, centroid_y = contour_centroid(contour)
draw_contours(mat, [contour], (0, 255, 0), thickness=2)
draw_circle(mat, (centroid_x, centroid_y), 7, (255, 255, 255), thickness=-1)
# #self.post('centroids', mat)
# GREEN_BINS[bin_index].visible = True
# GREEN_BINS[bin_index].centroid_x = centroid_x
# GREEN_BINS[bin_index].centroid_y = centroid_y
# if found_center:
# predicted_x, predicted_y = predict_xy(center_x, center_y, centroid_x, centroid_y)
# if within_camera(predicted_x, predicted_y):
# cv2.circle(mat, (predicted_x, predicted_y), 7, (255, 0, 0), -1)
# GREEN_BINS[bin_index].predicted_location = True
# GREEN_BINS[bin_index].predicted_x = predicted_x
# GREEN_BINS[bin_index].predicted_y = predicted_y
# bin_index += 1
assign_bins(contours[:len(GREEN_BINS)], GREEN_BINS, self)
if debug and POST_UMAT:
self.post('centroids', mat)
# # draw centroids for red sections and predict location ~3 seconds later
# _, contours, _ = cv2.findContours(red_threshed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# contours = sorted(contours, key=lambda cont: cv2.contourArea(cont), reverse=True)
# bin_index = 0
# for contour in contours[:len(RED_BINS)]:
# centroid_x, centroid_y = contour_centroid(contour)
# cv2.drawContours(mat, [contour], -1, (0, 255, 0), 2)
# cv2.circle(mat, (centroid_x, centroid_y), 7, (255, 255, 255), -1)
# assign_bins(contours[:len(RED_BINS)], RED_BINS, self)
# if POST_UMAT:
# self.post('centroids', mat)
# # draw centroids for black sections and predict location ~3 seconds later
# _, contours, _ = cv2.findContours(black_threshed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# contours = sorted(contours, key=lambda cont: cv2.contourArea(cont), reverse=True)
# bin_index = 0
# for contour in contours[:len(BLACK_BINS)]:
# centroid_x, centroid_y = contour_centroid(contour)
# cv2.drawContours(mat, [contour], -1, (0, 255, 0), 2)
# cv2.circle(mat, (centroid_x, centroid_y), 7, (255, 255, 255), -1)
# assign_bins(contours[:len(BLACK_BINS)], BLACK_BINS, self)
# if POST_UMAT:
self.post('centroids', mat)
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
for s in ALL_SHM:
s.commit()
if __name__ == '__main__':
Roulette('downward', options)()
| bsd-3-clause |
vied12/superdesk | server/apps/rules/rule_sets.py | 3 | 1500 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.errors import SuperdeskApiError
logger = logging.getLogger(__name__)
class RuleSetsResource(Resource):
schema = {
'name': {
'type': 'string',
'iunique': True,
'required': True,
'minlength': 1
},
'rules': {
'type': 'list'
}
}
datasource = {
'default_sort': [('name', 1)]
}
privileges = {'POST': 'rule_sets', 'DELETE': 'rule_sets', 'PATCH': 'rule_sets'}
class RuleSetsService(BaseService):
def update(self, id, updates, original):
"""
Overriding to set the value of "new" attribute of rules to empty string if it's None.
"""
for rule in updates.get('rules', {}):
if rule['new'] is None:
rule['new'] = ''
return super().update(id, updates, original)
def on_delete(self, doc):
if self.backend.find_one('ingest_providers', req=None, rule_set=doc['_id']):
raise SuperdeskApiError.forbiddenError("Cannot delete Rule set as it's associated with channel(s).")
| agpl-3.0 |
Theer108/invenio | invenio/modules/pages/views.py | 7 | 4531 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Views for Pages module."""
import six
from flask import Blueprint, request, render_template, current_app
from flask.ctx import after_this_request
from sqlalchemy import event
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import NotFound
from invenio.base.globals import cfg
from invenio.ext.sqlalchemy import db
from invenio.base.signals import before_handle_user_exception
# from invenio.ext.cache import cache
from invenio.modules.pages.models import Page
blueprint = Blueprint('pages', __name__, url_prefix='/',
template_folder='templates')
@blueprint.before_app_first_request
def register():
"""Register all pages before the first application request."""
try:
_add_url_rule([page.url for page in Page.query.all()])
except:
current_app.logger.warn('Pages were not loaded.')
@blueprint.errorhandler(NoResultFound)
def no_result_found(_dummy):
"""Render 404 page when no page has been found."""
return render_template('404.html'), 404
@blueprint.errorhandler(404)
def errorhandler(exception):
"""Try to render the page otherwise continues to app error handler."""
try:
return view()
except NoResultFound:
# FIXME reraise?
# raise exception
pass
def view():
"""
Public interface to the page view.
Models: `pages.pages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`pages/default.html` if template_name is not defined.
Context:
page
`pages.pages` object
"""
return render_page(request.path)
# @cache.memoize for guests?
def render_page(path):
"""Internal interface to the page view."""
page = Page.query.filter(db.or_(Page.url == request.path,
Page.url == request.path + "/")).first()
return render_template([page.template_name, cfg['PAGES_DEFAULT_TEMPLATE']],
page=page)
def before_url_insert(mapper, connection, target):
"""Massage target url."""
if not target.url.startswith("/"):
target.url = "/" + target.url
if not target.url.endswith("/") and cfg["PAGES_APPEND_SLASH"]:
target.url = target.url + "/"
def page_orm_handler(mapper, connection, target):
"""Handle model change."""
_add_url_rule(target.url)
# event.listen(Page, 'after_delete', rebuild_cache)
event.listen(Page, 'before_insert', before_url_insert)
event.listen(Page, 'after_insert', page_orm_handler)
event.listen(Page, 'after_update', page_orm_handler)
def handle_not_found(exception, **extra):
"""Custom blueprint exception handler."""
if not isinstance(exception, NotFound):
return
page = Page.query.filter(db.or_(Page.url == request.path,
Page.url == request.path + "/")).first()
if page is not None:
_add_url_rule(page.url)
# Modify request to call our errorhandler.
req_endpoint = request.url_rule.endpoint
request.url_rule.endpoint = blueprint.name + '.view'
@after_this_request
def restore_url_map(response):
request.url_rule.endpoint = req_endpoint
return response
before_handle_user_exception.connect(handle_not_found)
def _add_url_rule(url_or_urls):
"""Register url rule to application url map."""
old = current_app._got_first_request
# This is bit of cheating to overcome @flask.app.setupmethod decorator.
current_app._got_first_request = False
if isinstance(url_or_urls, six.string_types):
url_or_urls = [url_or_urls]
map(lambda url: current_app.add_url_rule(url, 'pages.view', view),
url_or_urls)
current_app._got_first_request = old
| gpl-2.0 |
ndtran/compassion-modules | message_center_compassion/tests/test_messages.py | 2 | 15121 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.tests import common
from openerp.exceptions import Warning
from openerp import netsvc
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.config import config
from datetime import date
from random import randint
import logging
logger = logging.getLogger(__name__)
class test_messages(common.TransactionCase):
"""Test and simulate reception of GMC Messages.
Warning : Please make sure module sponsorship_sync_gp is not installed
in order to be sure no information is sent to GP.
"""
def setUp(self):
super(test_messages, self).setUp()
self.message_obj = self.env['gmc.message.pool'].with_context(
test_mode=True)
self.action_obj = self.env['gmc.action']
self.child_obj = self.env['compassion.child']
self.today = date.today().strftime(DF)
account_type = self.env['account.account.type'].search([
('code', '=', 'receivable')])[0].id
property_account_receivable = self.env['account.account'].search([
('type', '=', 'receivable'),
('user_type', '=', account_type)])[0].id
account_type = self.env['account.account.type'].search([
('code', '=', 'payable')])[0].id
property_account_payable = self.env['account.account'].search([
('type', '=', 'payable'),
('user_type', '=', account_type)])[0].id
self.property_account_income = self.env['account.account'].search([
('type', '=', 'other'),
('name', '=', 'Property Account Income Test')])[0].id
category_id = self.env['res.partner.category'].create({
'name': 'sponsor'}).id
self.partner_id = self.env['res.partner'].create(
self.cr, self.uid, {
'lang': 'fr_CH',
'lastname': 'Client 37',
'property_account_receivable': property_account_receivable,
'property_account_payable': property_account_payable,
'notification_email_send': 'none',
'category_id': [(4, category_id)],
}).id
self.payment_term_id = self.env['account.payment.term'].search([
('name', '=', '15 Days')])[0].id
self.origin_id = self.env['recurring.contract.origin'].create({
'name': 'other',
'type': 'event'}).id
self.group_id = self._create_group_id(
'do_nothing', 1, 'month', self.partner_id, 1,
self.payment_term_id)
def _allocate_new_children(self, child_keys):
"""Creates allocate message and process them for given
child keys.
"""
if not isinstance(child_keys, list):
child_keys = [child_keys]
message_ids = [self._create_allocate_message(child_key)
for child_key in child_keys]
self.message_obj.browse(message_ids).process_messages()
return self.child_obj.search([('code', 'in', child_keys)])
def _create_allocate_message(self, child_key, child_id=0):
"""Creates an allocate message. If child_id is given, the allocation
is on an existing child."""
action_id = self.action_obj.search([
('type', '=', 'allocate'),
('model', '=', 'compassion.child')])[0].id
message_vals = {
'date': self.today,
'action_id': action_id,
'incoming_key': child_key,
'partner_country_code': 'CH'
}
if child_id:
message_vals.update({
'child_id': child_id,
'object_id': child_id
})
return self.message_obj.create(message_vals).id
def _create_incoming_message(self, type, model, object_id, child_key='',
event=''):
"""Generic method for creating an incoming message and process it.
Args:
- type: one of ('update','deallocate','depart')
- model: either 'compassion.child' or 'compassion.project'
- object_id: id of related child or project object
"""
action_id = self.action_obj.search([
('type', '=', type), ('model', '=', model)])[0].id
message_vals = {
'date': self.today,
'action_id': action_id,
'object_id': object_id,
'incoming_key': child_key,
'partner_country_code': 'CH',
'event': event
}
mess = self.message_obj.create(message_vals)
mess.process_messages()
return mess
def _create_active_contract(self, child_id):
"""Creates a new contract for given child."""
contract_vals = {
'partner_id': self.partner_id,
'correspondant_id': self.partner_id,
'origin_id': self.origin_id,
'group_id': self.group_id,
'channel': 'direct',
'num_pol_ga': randint(700, 999),
'child_id': child_id,
'next_invoice_date': self.today,
'activation_date': self.today,
'type': 'S',
}
con_obj = self.env['recurring.contract'].with_context(
default_type='S')
contract = con_obj.create(contract_vals)
contract.force_activation()
return contract
def _send_messages(self, message_type, failure_reason='', will_fail=False):
"""Looks for existing outgoing messages of given message_type
and process (send) them. Simulate reception of a confirmation
message from GMC which can be success or failure."""
messages = self.message_obj.search([
('direction', '=', 'out'), ('name', '=', message_type),
('state', '=', 'new')])
self.assertTrue(messages)
if will_fail:
with self.assertRaises(Warning):
messages.process_messages()
else:
messages.process_messages()
status = 'success' if not failure_reason else 'failure'
if message_type == 'CreateGift' and status == 'success':
# Gifts messages go to 'fondue' status.
status = 'fondue'
for message in messages:
# Messages must be pending
self.assertEqual(message.state, 'pending')
self.assertTrue(message.request_id)
self.message_obj.ack(message.request_id, status, failure_reason)
for message in messages:
# Messages must have a valid status
self.assertEqual(message.state, status)
return messages
def _create_gift(self, contract_id):
"""Creates a Gift Invoice for given contract using the
Generate Gift Wizard of module sponsorship_compassion
"""
gift = self.env['product.product'].search([
('name', '=', 'Project Gift')])[0]
gift.write({
'property_account_income': self.property_account_income})
gift_wizard = self.env['generate.gift.wizard'].with_context(
active_ids=[contract_id])
wizard = gift_wizard.create({
'amount': 60,
'product_id': gift.id,
'invoice_date': self.today,
'description': 'gift for bicycle'})
res = wizard.generate_invoice()
inv_ids = res['domain'][0][2]
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(
self.uid, 'account.invoice', inv_ids[0], 'invoice_open', self.cr)
self._pay_invoice(inv_ids[0])
def _create_group_id(self, change_method, rec_value, rec_unit, partner_id,
adv_biling_months, payment_term_id, ref=None):
"""
Create a group with 2 possibilities :
- ref is not given so it takes "/" default values
- ref is given
"""
group_obj = self.env['recurring.contract.group']
group = group_obj.create({
'partner_id': partner_id})
group_vals = {
'change_method': change_method,
'recurring_value': rec_value,
'recurring_unit': rec_unit,
'partner_id': partner_id,
'advance_billing_months': adv_biling_months,
'payment_term_id': payment_term_id,
}
if ref:
group_vals['ref'] = ref
group.write(group_vals)
return group.id
def test_config_set(self):
"""Test that the config is properly set on the server.
"""
url = config.get('middleware_url')
self.assertTrue(url)
def test_gmc_scenario(self):
"""This is the test scenario detailed in file
..data/test_scenario.docx
"""
# Simulate GMC Allocation of 4 new children
child_keys = ["PE3760148", "IO6790210"] # , "UG8320012", "UG8350016"]
children = self._allocate_new_children(child_keys)
child_ids = children.ids
# Check all 4 children are available in database
self.assertEqual(len(children), len(child_keys))
for child in children:
self.assertEqual(child.state, 'N')
self.assertFalse(child.has_been_sponsored)
self.assertTrue(child.name)
self.assertTrue(child.case_study_ids)
self.assertTrue(child.unique_id)
# Create a commitment for one child
contract = self._create_active_contract(child_ids[0])
######################################################################
# Test child departure and reinstatement #
######################################################################
child_departed_id = child_ids[0]
self._send_messages('UpsertConstituent')
self._send_messages('CreateCommitment')
self._create_incoming_message(
'depart', 'compassion.child', child_departed_id)
child = children[0]
self.assertEqual(child.state, 'F')
self._send_messages('CancelCommitment')
# The child reinstated should be in the correct state
message = self._create_allocate_message(
child_keys[0], child_departed_id)
message.process_messages()
self.assertEqual(child.state, 'Z')
# The sponsorship should be terminated
self.assertEqual(contract.state, 'terminated')
self.assertEqual(contract.gmc_state, 'depart')
self.assertEqual(contract.end_reason, '1') # child departure
# Test child deallocation
self._create_incoming_message(
'deallocate', 'compassion.child', child_departed_id)
self.assertEqual(child.state, 'X')
######################################################################
# Test transfer scenario (update message with new child_key) #
# for a sponsored child. #
# we simulate a different key by manually writing another code #
######################################################################
contract = self._create_active_contract(child_ids[1])
self._create_incoming_message(
'update', 'compassion.child', child_ids[1], 'UG8360007',
'Transfer')
child = children[1]
self.assertEqual(contract.state, 'active')
self.assertEqual(child.state, 'P')
self.assertEqual(child.code, 'UG8360007')
self.assertEqual(child.sponsor_id.id, contract.partner_id.id)
self.assertEqual(contract.state, 'active')
self.assertEqual(contract.gmc_state, 'transfer')
self.assertEqual(contract.child_id.id, child.id)
######################################################################
# Test UpdateChild and UpdateProject messages #
# We only need to check that no error is raised #
######################################################################
self._create_incoming_message(
'update', 'compassion.child', child_ids[1], child_keys[1],
'CaseStudy')
self.assertEqual(contract.gmc_state, 'casestudy')
self._create_incoming_message(
'update', 'compassion.child', child_ids[1], child_keys[1],
'NewImage')
self.assertEqual(contract.gmc_state, 'picture')
project_id = self.env['compassion.project'].search(
[('code', '=', child_keys[1][:5])])[0].id
self._create_incoming_message(
'update', 'compassion.project', project_id)
# Test sending gifts
self._create_gift(contract.id)
# Send gift before commitment
self._send_messages('CreateGift', will_fail=True)
# Send Constituent and Commitment
self._send_messages('UpsertConstituent')
self._send_messages('CreateCommitment')
# Send Gift
self._send_messages('CreateGift')
# Sponsor cancels the sponsorship
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(self.uid, 'recurring.contract', contract.id,
'contract_terminated', self.cr)
self._send_messages('CancelCommitment')
def _pay_invoice(self, invoice_id):
bank_journal = self.env['account.journal'].search([
('type', '=', 'bank')])[0]
invoice = self.env['account.invoice'].browse(invoice_id)
account_id = invoice.partner_id.property_account_receivable.id
move_obj = self.env['account.move']
move_line_obj = self.env['account.move.line']
move = move_obj.create({
'journal_id': bank_journal.id
})
move_line_obj.create({
'name': 'BNK-' + invoice.number,
'move_id': move.id,
'partner_id': invoice.partner_id.id,
'account_id': bank_journal.default_debit_account_id.id,
'debit': invoice.amount_total,
'journal_id': bank_journal.id,
'period_id': invoice.period_id.id,
'date': invoice.date_due
})
mv_line = move_line_obj.create({
'name': 'PAY-' + invoice.number,
'move_id': move.id,
'partner_id': invoice.partner_id.id,
'account_id': account_id,
'credit': invoice.amount_total,
'journal_id': invoice.journal_id.id,
'period_id': invoice.period_id.id,
'date': invoice.date_due
})
move.button_validate()
to_reconcile = move_line_obj.search([
('move_id', '=', invoice.move_id.id),
('account_id', '=', account_id)]) + mv_line
to_reconcile.reconcile()
| agpl-3.0 |
thekingofkings/focusread | libs/dns/rdataset.py | 16 | 11556 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
from io import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
from ._compat import string_types
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(dns.exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(dns.set.Set):
"""A DNS rdataset.
@ivar rdclass: The class of the rdataset
@type rdclass: int
@ivar rdtype: The type of the rdataset
@type rdtype: int
@ivar covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@ivar ttl: The DNS TTL (Time To Live) value
@type ttl: int
"""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Create a new rdataset of the specified class and type.
@see: the description of the class instance variables for the
meaning of I{rdclass} and I{rdtype}"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = 0
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
@param ttl: The TTL
@type ttl: int"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional I{ttl} parameter is supplied, then
self.update_ttl(ttl) will be called prior to adding the rdata.
@param rd: The rdata
@type rd: dns.rdata.Rdata object
@param ttl: The TTL
@type ttl: int"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if ttl is not None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
@param other: The rdataset from which to update
@type other: dns.rdataset.Rdataset object"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two rdatasets are equal if they have the same class, type, and
covers, and contain the same rdata.
@rtype: bool"""
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if name is not None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO()
if override_rdclass is not None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
s.write(u'%s%s%s %s\n' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype)))
else:
for rd in self:
s.write(u'%s%s%d %s %s %s\n' %
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize,
**kw)))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
@param name: The owner name of the RRset that will be emitted
@type name: dns.name.Name object
@param file: The file to which the wire format data will be appended
@type file: file
@param compress: The compression table to use; the default is None.
@type compress: dict
@param origin: The origin to be appended to any relative names when
they are emitted. The default is None.
@returns: the number of records emitted
@rtype: int
"""
if override_rdclass is not None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns True if this rdataset matches the specified class, type,
and covers"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
if isinstance(rdclass, string_types):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
return from_rdata_list(ttl, rdatas)
| mit |
bsa11b/mtasa-blue | vendor/google-breakpad/src/tools/gyp/pylib/gyp/generator/dump_dependency_json.py | 899 | 2768 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| gpl-3.0 |
dbrawand/zippy | app/zippylib/primer.py | 2 | 10154 | #!/usr/bin/env python
import sys, os, re
from hashlib import md5
import primer3
import pysam
import subprocess
from collections import defaultdict, OrderedDict
'''just a wrapper for pysam'''
class MultiFasta(object):
def __init__(self,file):
self.file = file
def createPrimers(self,db,bowtie='bowtie2'):
# run bowtie (max 1000 alignments, allow for one gap/mismatch?)
mapfile = self.file+'.sam'
if not os.path.exists(mapfile):
proc = subprocess.check_call( \
[bowtie, '-f', '--end-to-end', \
'-k 10', '-L 10', '-N 1', '-D 20', '-R 3', \
'-x', db, '-U', self.file, '>', mapfile ])
# Read fasta file (Create Primer)
primers = {}
fasta = pysam.FastaFile(self.file)
#print fasta.references
for s in fasta.references:
primername = s.split('|')[0]
try:
targetposition = s.split('|')[1]
reTargetposition = re.match(r'(\w+):(\d+)-(\d+)',targetposition)
except:
raise Exception('fixme')
# modify constructor of primer to accept target position
# chrom,offset,length,reverse
# MAKE TARGETPOSITION A LOCUS OBJECT VVVV
reverse = True if primername.split('_')[-1].startswith("r") else False
targetLocus = Locus(reTargetposition.group(1), int(reTargetposition.group(2)), int(reTargetposition.group(3))-int(reTargetposition.group(2)), reverse)
primers[primername] = Primer(primername,fasta.fetch(s),targetLocus)
# read SAM OUTPUT
mappings = pysam.Samfile(mapfile,'r')
# print self.file
# print '\n'.join(sorted(primers.keys()))
for aln in mappings:
# print aln.qname
primername = aln.qname.split('|')[0]
# add full matching loci
if not any(zip(*aln.cigar)[0]): # all matches (full length)
primers[primername].addTarget(mappings.getrname(aln.reference_id), aln.pos, aln.is_reverse)
# add other significant matches (1 mismatch/gap)
elif zip(*aln.cigar)[0].count(0) >= len(aln.seq)-1:
primers[primername].sigmatch += 1
## delete mapping FILE
####os.unlink(self.file+'.sam')
return primers.values()
'''fasta/primer'''
class Primer(object):
def __init__(self,name,seq,targetposition=None,tm=None,gc=None,loci=[]):
self.name = name if name else 'primer_'+md5(seq).hexdigest()[:8]
self.seq = seq.upper()
self.tm = tm
self.gc = gc
self.loci = [] # genome matches
self.snp = [] # same order as loci attribute
self.meta = {} # metadata
self.sigmatch = 0 # significant other matches (just counted)
self.targetposition = targetposition
if loci:
pass
def __str__(self):
return '<Primer ('+self.name+'): '+self.seq+', Targets: '+str(len(self.loci))+' (other significant: '+str(self.sigmatch)+'); Target position: '+str(self.targetposition)+'>'
def __repr__(self):
return '<Primer ('+self.name+'): '+self.seq+', Targets: '+str(len(self.loci))+' (other significant: '+str(self.sigmatch)+'); Target position: '+str(self.targetposition)+'>'
def __len__(self):
return len(self.seq)
# def __repr__(self):
# '''primer sequence with locations and annotations'''
# raise NotImplementedError
def snpFilter(self,position):
# return list of boolean if spliced position has Variant
for l in self.loci:
raise NotImplementedError
def fasta(self,seqname=None):
if not seqname:
seqname = self.name
if 'POSITION' in self.meta.keys():
seqname += '|'+self.meta['POSITION'][0]+':'+"-".join(map(str,self.meta['POSITION'][1:]))
return "\n".join([ ">"+seqname, self.seq ])
def addTarget(self, chrom, pos, reverse):
self.loci.append(Locus(chrom,pos,len(self),reverse))
return
def calcProperties(self):
# get Tm via primer3
self.tm = primer3.calcTm(self.seq)
# calc GC
self.gc = (self.seq.count('G') + self.seq.count('C')) / float(len(self.seq))
return
def snpCheckPrimer(self,vcf):
self.snp = self.targetposition.snpCheck(vcf)
return True if self.snp else False
def checkTarget(self):
# print 'OK'
if self.targetposition is not None:
for locus in self.loci:
if locus.chrom == self.targetposition.chrom:
if int(locus.offset) == int(self.targetposition.offset):
return True
return False
'''Locus'''
class Locus(object):
def __init__(self,chrom,offset,length,reverse):
self.chrom = chrom
self.offset = offset
self.length = length
self.reverse = reverse
def __str__(self):
strand = '-' if self.reverse else '+'
return self.chrom+":"+str(self.offset)+":"+strand
def __lt__(self,other):
return (self.chrom, self.offset) < (other.chrom, other.offset)
def snpCheck(self,database):
db = pysam.TabixFile(database)
print '\tLOCUS', str(self), db
print '\t\t', self.chrom,self.offset,self.offset+self.length
print '\t\t\t', self.length
try:
snps = db.fetch(self.chrom,self.offset,self.offset+self.length)
except ValueError:
snps = []
except:
raise
# query database and translate to primer positions
print snps
snp_positions = []
for v in snps:
print "\t\tSNP", v
f = v.split()
snpOffset = int(f[1])-self.offset
snpLength = max(len(f[3]),len(f[4]))
snp_positions.append( (f[0],snpOffset,snpLength,f[2]) )
return snp_positions
class Primer3(object):
def __init__(self,genome,target,flank=200):
self.genome = genome
self.target = target
self.flank = flank
fasta = pysam.FastaFile(self.genome)
self.designregion = ( self.target[0], self.target[1]-self.flank, self.target[2]+self.flank )
self.sequence = fasta.fetch(*self.designregion)
self.pairs = []
self.explain = []
def __len__(self):
return len(self.pairs)
def design(self,name,pars):
# extract sequence with flanks
# Sequence args
seq = {
'SEQUENCE_ID': name,
'SEQUENCE_TEMPLATE': self.sequence,
'SEQUENCE_PRIMER_PAIR_OK_REGION_LIST': [0, self.flank, len(self.sequence)-self.flank, self.flank]
}
# design primers
primers = primer3.bindings.designPrimers(seq,pars)
# parse primer
primerdata, explain = defaultdict(dict), []
for k,v in primers.items():
# print k, v
# print self. designregion
m = re.match(r'PRIMER_(RIGHT|LEFT)_(\d+)(.*)',k)
if m:
primername = name+"_"+str(m.group(2))+'_'+m.group(1)
if m.group(3):
primerdata[primername][m.group(3)[1:]] = v
else:
absoluteStart = self.designregion[1]+v[0]-(v[1]-1) if m.group(1)=="RIGHT" else self.designregion[1]+v[0]
absoluteEnd = self.designregion[1]+v[0] if m.group(1)=="RIGHT" else self.designregion[1]+v[0]+v[1]
primerdata[primername]['POSITION'] = (self.designregion[0], absoluteStart, absoluteEnd)
elif k.endswith('EXPLAIN'):
self.explain.append(v)
designedPrimers, designedPairs = {}, {}
for k,v in sorted(primerdata.items()):
# k primername
# v dict of metadata
if 'SEQUENCE' not in designedPrimers.keys():
designedPrimers[v['SEQUENCE']] = Primer(k,v['SEQUENCE']) # no name autosets
designedPrimers[v['SEQUENCE']].calcProperties()
m = re.search(r'(\d+)_(LEFT|RIGHT)',k)
# store pairs (reference primers)
if int(m.group(1)) not in designedPairs.keys():
designedPairs[int(m.group(1))] = [None, None]
designedPairs[int(m.group(1))][0 if m.group(2).startswith('LEFT') else 1] = designedPrimers[v['SEQUENCE']]
# all other datafields
designedPrimers[v['SEQUENCE']].meta = v
# store
self.pairs += OrderedDict(sorted(designedPairs.items())).values()
# if design fails there will be 0 pairs, simple!
# if not self.pairs:
# print self.explain
# print self.sequence
# #raise Exception('DesignFail')
return len(self.pairs)
def show(self,width=100):
# get features
for p in self.pairs:
f = { self.flank:'[', len(self.sequence)-self.flank:']' }
f[int(p[0].meta[''][0] + p[0].meta[''][1])]= '>'
f[int(p[1].meta[''][0])] = '<'
# get fstring positions
fpos = defaultdict(list)
for k in sorted(f.keys()):
p = int(k*width/float(2*self.flank+(self.target[2]-self.target[1])))
fpos[p].append(f[k])
# create featurestring
fstring = ''
for p in sorted(fpos.keys()):
# spacer
if len(fstring) < p:
fstring += ' ' * (p-len(fstring))
# char
if len(fpos[p]) > 1:
fstring += "*"
else:
fstring += fpos[p][0]
# fill end
if len(fstring) < width:
fstring += ' ' * (width-len(fstring))
# print
print self.target[0], self.target[1],'|', fstring,'|', self.target[2]
return
if __name__=="__main__":
mf = MultiFasta(sys.argv[1])
primers = mf.createPrimers('/Users/dbrawand/dev/snappy/WORK/genome/human_g1k_v37.bowtie')
for primer in primers:
print primer
| mit |
unseenlaser/python-for-android | python3-alpha/python3-src/Lib/test/test_linecache.py | 60 | 4141 | """ Tests for the linecache module """
import linecache
import unittest
import os.path
from test import support
FILENAME = linecache.__file__
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(support.__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(support.TESTFN)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
source_name = support.TESTFN + '.py'
self.addCleanup(support.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_main():
support.run_unittest(LineCacheTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
partofthething/home-assistant | homeassistant/components/minio/minio_helper.py | 19 | 6523 | """Minio helper methods."""
from collections.abc import Iterable
import json
import logging
from queue import Queue
import re
import threading
import time
from typing import Iterator, List
from urllib.parse import unquote
from minio import Minio
from urllib3.exceptions import HTTPError
_LOGGER = logging.getLogger(__name__)
_METADATA_RE = re.compile("x-amz-meta-(.*)", re.IGNORECASE)
def normalize_metadata(metadata: dict) -> dict:
"""Normalize object metadata by stripping the prefix."""
new_metadata = {}
for meta_key, meta_value in metadata.items():
match = _METADATA_RE.match(meta_key)
if not match:
continue
new_metadata[match.group(1).lower()] = meta_value
return new_metadata
def create_minio_client(
endpoint: str, access_key: str, secret_key: str, secure: bool
) -> Minio:
"""Create Minio client."""
return Minio(endpoint, access_key, secret_key, secure)
def get_minio_notification_response(
minio_client, bucket_name: str, prefix: str, suffix: str, events: List[str]
):
"""Start listening to minio events. Copied from minio-py."""
query = {"prefix": prefix, "suffix": suffix, "events": events}
# pylint: disable=protected-access
return minio_client._url_open(
"GET", bucket_name=bucket_name, query=query, preload_content=False
)
class MinioEventStreamIterator(Iterable):
"""Iterator wrapper over notification http response stream."""
def __iter__(self) -> Iterator:
"""Return self."""
return self
def __init__(self, response):
"""Init."""
self._response = response
self._stream = response.stream()
def __next__(self):
"""Get next not empty line."""
while True:
line = next(self._stream)
if line.strip():
event = json.loads(line.decode("utf-8"))
if event["Records"] is not None:
return event
def close(self):
"""Close the response."""
self._response.close()
class MinioEventThread(threading.Thread):
"""Thread wrapper around minio notification blocking stream."""
def __init__(
self,
queue: Queue,
endpoint: str,
access_key: str,
secret_key: str,
secure: bool,
bucket_name: str,
prefix: str,
suffix: str,
events: List[str],
):
"""Copy over all Minio client options."""
super().__init__()
self._queue = queue
self._endpoint = endpoint
self._access_key = access_key
self._secret_key = secret_key
self._secure = secure
self._bucket_name = bucket_name
self._prefix = prefix
self._suffix = suffix
self._events = events
self._event_stream_it = None
self._should_stop = False
def __enter__(self):
"""Start the thread."""
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop and join the thread."""
self.stop()
def run(self):
"""Create MinioClient and run the loop."""
_LOGGER.info("Running MinioEventThread")
self._should_stop = False
minio_client = create_minio_client(
self._endpoint, self._access_key, self._secret_key, self._secure
)
while not self._should_stop:
_LOGGER.info("Connecting to minio event stream")
response = None
try:
response = get_minio_notification_response(
minio_client,
self._bucket_name,
self._prefix,
self._suffix,
self._events,
)
self._event_stream_it = MinioEventStreamIterator(response)
self._iterate_event_stream(self._event_stream_it, minio_client)
except json.JSONDecodeError:
if response:
response.close()
except HTTPError as error:
_LOGGER.error("Failed to connect to Minio endpoint: %s", error)
# Wait before attempting to connect again.
time.sleep(1)
except AttributeError:
# When response is closed, iterator will fail to access
# the underlying socket descriptor.
break
def _iterate_event_stream(self, event_stream_it, minio_client):
for event in event_stream_it:
for event_name, bucket, key, metadata in iterate_objects(event):
presigned_url = ""
try:
presigned_url = minio_client.presigned_get_object(bucket, key)
# Fail gracefully. If for whatever reason this stops working,
# it shouldn't prevent it from firing events.
# pylint: disable=broad-except
except Exception as error:
_LOGGER.error("Failed to generate presigned url: %s", error)
queue_entry = {
"event_name": event_name,
"bucket": bucket,
"key": key,
"presigned_url": presigned_url,
"metadata": metadata,
}
_LOGGER.debug("Queue entry, %s", queue_entry)
self._queue.put(queue_entry)
def stop(self):
"""Cancel event stream and join the thread."""
_LOGGER.debug("Stopping event thread")
self._should_stop = True
if self._event_stream_it is not None:
self._event_stream_it.close()
self._event_stream_it = None
_LOGGER.debug("Joining event thread")
self.join()
_LOGGER.debug("Event thread joined")
def iterate_objects(event):
"""
Iterate over file records of notification event.
Most of the time it should still be only one record.
"""
records = event.get("Records", [])
for record in records:
event_name = record.get("eventName")
bucket = record.get("s3", {}).get("bucket", {}).get("name")
key = record.get("s3", {}).get("object", {}).get("key")
metadata = normalize_metadata(
record.get("s3", {}).get("object", {}).get("userMetadata", {})
)
if not bucket or not key:
_LOGGER.warning("Invalid bucket and/or key, %s, %s", bucket, key)
continue
key = unquote(key)
yield event_name, bucket, key, metadata
| mit |
7ing/kubernetes | cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py | 31 | 4256 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import subprocess
from charms import layer
from charms.reactive import when
from charmhelpers.core import hookenv
from charms.layer import nginx
from subprocess import Popen
from subprocess import PIPE
from subprocess import STDOUT
@when('certificates.available')
def request_server_certificates(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('nginx.available', 'apiserver.available',
'certificates.server.cert.available')
def install_load_balancer(apiserver, tls):
''' Create the default vhost template for load balancing '''
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
server_cert_path = layer_options.get('server_certificate_path')
cert_exists = server_cert_path and os.path.isfile(server_cert_path)
server_key_path = layer_options.get('server_key_path')
key_exists = server_key_path and os.path.isfile(server_key_path)
# Do both the the key and certificate exist?
if cert_exists and key_exists:
# At this point the cert and key exist, and they are owned by root.
chown = ['chown', 'www-data:www-data', server_cert_path]
# Change the owner to www-data so the nginx process can read the cert.
subprocess.call(chown)
chown = ['chown', 'www-data:www-data', server_key_path]
# Change the owner to www-data so the nginx process can read the key.
subprocess.call(chown)
hookenv.open_port(hookenv.config('port'))
services = apiserver.services()
nginx.configure_site(
'apilb',
'apilb.conf',
server_name='_',
services=services,
port=hookenv.config('port'),
server_certificate=server_cert_path,
server_key=server_key_path,
)
hookenv.status_set('active', 'Loadbalancer ready.')
@when('nginx.available')
def set_nginx_version():
''' Surface the currently deployed version of nginx to Juju '''
cmd = 'nginx -v'
p = Popen(cmd, shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
close_fds=True)
raw = p.stdout.read()
# The version comes back as:
# nginx version: nginx/1.10.0 (Ubuntu)
version = raw.split(b'/')[-1].split(b' ')[0]
hookenv.application_version_set(version.rstrip())
@when('website.available')
def provide_application_details(website):
''' re-use the nginx layer website relation to relay the hostname/port
to any consuming kubernetes-workers, or other units that require the
kubernetes API '''
website.configure(port=hookenv.config('port'))
@when('loadbalancer.available')
def provide_loadbalancing(loadbalancer):
'''Send the public address and port to the public-address interface, so
the subordinates can get the public address of this loadbalancer.'''
loadbalancer.set_address_port(hookenv.unit_get('public-address'),
hookenv.config('port'))
| apache-2.0 |
gazpachoking/Flexget | flexget/plugins/output/download.py | 4 | 22908 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import unquote
import hashlib
import io
import logging
import mimetypes
import os
import shutil
import socket
import sys
import tempfile
from cgi import parse_header
from http.client import BadStatusLine
from requests import RequestException
from flexget import options, plugin
from flexget.event import event
from flexget.utils.tools import decode_html, native_str_to_text
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('download')
class PluginDownload(object):
"""
Downloads content from entry url and writes it into a file.
Example::
download: ~/torrents/
Allow HTML content:
By default download plugin reports failure if received content
is a html. Usually this is some sort of custom error page without
proper http code and thus entry is assumed to be downloaded
incorrectly.
In the rare case you actually need to retrieve html-pages you must
disable this feature.
Example::
download:
path: ~/something/
fail_html: no
You may use commandline parameter --dl-path to temporarily override
all paths to another location.
"""
schema = {
'oneOf': [
{
'title': 'specify options',
'type': 'object',
'properties': {
'path': {'type': 'string', 'format': 'path'},
'fail_html': {'type': 'boolean', 'default': True},
'overwrite': {'type': 'boolean', 'default': False},
'temp': {'type': 'string', 'format': 'path'},
'filename': {'type': 'string'},
},
'additionalProperties': False,
},
{'title': 'specify path', 'type': 'string', 'format': 'path'},
{'title': 'no options', 'type': 'boolean', 'enum': [True]},
]
}
def process_config(self, config):
"""Return plugin configuration in advanced form"""
if isinstance(config, str):
config = {'path': config}
if not isinstance(config, dict):
config = {}
if not config.get('path'):
config['require_path'] = True
config.setdefault('fail_html', True)
return config
def on_task_download(self, task, config):
config = self.process_config(config)
# set temporary download path based on user's config setting or use fallback
tmp = config.get('temp', os.path.join(task.manager.config_base, 'temp'))
self.get_temp_files(
task,
require_path=config.get('require_path', False),
fail_html=config['fail_html'],
tmp_path=tmp,
)
def get_temp_file(
self,
task,
entry,
require_path=False,
handle_magnets=False,
fail_html=True,
tmp_path=tempfile.gettempdir(),
):
"""
Download entry content and store in temporary folder.
Fails entry with a reason if there was problem.
:param bool require_path:
whether or not entries without 'path' field are ignored
:param bool handle_magnets:
when used any of urls containing magnet link will replace url,
otherwise warning is printed.
:param fail_html:
fail entries which url respond with html content
:param tmp_path:
path to use for temporary files while downloading
"""
if entry.get('urls'):
urls = entry.get('urls')
else:
urls = [entry['url']]
errors = []
for url in urls:
if url.startswith('magnet:'):
if handle_magnets:
# Set magnet link as main url, so a torrent client plugin can grab it
log.debug('Accepting magnet url for %s', entry['title'])
entry['url'] = url
break
else:
log.warning('Can\'t download magnet url')
errors.append('Magnet URL')
continue
if require_path and 'path' not in entry:
# Don't fail here, there might be a magnet later in the list of urls
log.debug('Skipping url %s because there is no path for download', url)
continue
error = self.process_entry(task, entry, url, tmp_path)
# disallow html content
html_mimes = ['html', 'text/html']
if entry.get('mime-type') in html_mimes and fail_html:
error = (
'Unexpected html content received from `%s` - maybe a login page?'
% entry['url']
)
self.cleanup_temp_file(entry)
if not error:
# Set the main url, so we know where this file actually came from
log.debug('Successfully retrieved %s from %s', entry['title'], url)
entry['url'] = url
break
else:
errors.append(error)
else:
# check if entry must have a path (download: yes)
if require_path and 'path' not in entry:
log.error('%s can\'t be downloaded, no path specified for entry', entry['title'])
entry.fail('no path specified for entry')
else:
entry.fail(', '.join(errors))
def save_error_page(self, entry, task, page):
received = os.path.join(task.manager.config_base, 'received', task.name)
if not os.path.isdir(received):
os.makedirs(received)
filename = os.path.join(received, pathscrub('%s.error' % entry['title'], filename=True))
log.error(
'Error retrieving %s, the error page has been saved to %s', entry['title'], filename
)
with io.open(filename, 'wb') as outfile:
outfile.write(page)
def get_temp_files(
self,
task,
require_path=False,
handle_magnets=False,
fail_html=True,
tmp_path=tempfile.gettempdir(),
):
"""Download all task content and store in temporary folder.
:param bool require_path:
whether or not entries without 'path' field are ignored
:param bool handle_magnets:
when used any of urls containing magnet link will replace url,
otherwise warning is printed.
:param fail_html:
fail entries which url respond with html content
:param tmp_path:
path to use for temporary files while downloading
"""
for entry in task.accepted:
self.get_temp_file(task, entry, require_path, handle_magnets, fail_html, tmp_path)
# TODO: a bit silly method, should be get rid of now with simplier exceptions ?
def process_entry(self, task, entry, url, tmp_path):
"""
Processes `entry` by using `url`. Does not use entry['url'].
Does not fail the `entry` if there is a network issue, instead just logs and returns a string error.
:param task: Task
:param entry: Entry
:param url: Url to try download
:param tmp_path: Path to store temporary files
:return: String error, if failed.
"""
try:
if task.options.test:
log.info('Would download: %s', entry['title'])
else:
if not task.manager.unit_test:
log.info('Downloading: %s', entry['title'])
self.download_entry(task, entry, url, tmp_path)
except RequestException as e:
log.warning('RequestException %s, while downloading %s', e, url)
return 'Network error during request: %s' % e
except BadStatusLine as e:
log.warning('Failed to reach server. Reason: %s', getattr(e, 'message', 'N/A'))
return 'BadStatusLine'
except IOError as e:
if hasattr(e, 'reason'):
log.warning('Failed to reach server. Reason: %s', e.reason)
elif hasattr(e, 'code'):
log.warning('The server couldn\'t fulfill the request. Error code: %s', e.code)
log.debug('IOError', exc_info=True)
return 'IOError'
except ValueError as e:
# Probably unknown url type
msg = 'ValueError %s' % e
log.warning(msg)
log.debug(msg, exc_info=True)
return msg
def download_entry(self, task, entry, url, tmp_path):
"""Downloads `entry` by using `url`.
:raises: Several types of exceptions ...
:raises: PluginWarning
"""
log.debug('Downloading url \'%s\'', url)
# get content
auth = None
if 'download_auth' in entry:
auth = entry['download_auth']
log.debug(
'Custom auth enabled for %s download: %s', entry['title'], entry['download_auth']
)
headers = task.requests.headers
if 'download_headers' in entry:
headers.update(entry['download_headers'])
log.debug(
'Custom headers enabled for %s download: %s',
entry['title'],
entry['download_headers'],
)
try:
response = task.requests.get(url, auth=auth, raise_status=False, headers=headers)
except UnicodeError:
log.error('Unicode error while encoding url %s', url)
return
if response.status_code != 200:
log.debug('Got %s response from server. Saving error page.', response.status_code)
# Save the error page
if response.content:
self.save_error_page(entry, task, response.content)
# Raise the error
response.raise_for_status()
return
# expand ~ in temp path
# TODO jinja?
try:
tmp_path = os.path.expanduser(tmp_path)
except RenderError as e:
entry.fail('Could not set temp path. Error during string replacement: %s' % e)
return
# Clean illegal characters from temp path name
tmp_path = pathscrub(tmp_path)
# create if missing
if not os.path.isdir(tmp_path):
log.debug('creating tmp_path %s' % tmp_path)
os.mkdir(tmp_path)
# check for write-access
if not os.access(tmp_path, os.W_OK):
raise plugin.PluginError('Not allowed to write to temp directory `%s`' % tmp_path)
# download and write data into a temp file
tmp_dir = tempfile.mkdtemp(dir=tmp_path)
fname = hashlib.md5(url.encode('utf-8', 'replace')).hexdigest()
datafile = os.path.join(tmp_dir, fname)
outfile = io.open(datafile, 'wb')
try:
for chunk in response.iter_content(chunk_size=150 * 1024, decode_unicode=False):
outfile.write(chunk)
except Exception as e:
# don't leave futile files behind
# outfile has to be closed before we can delete it on Windows
outfile.close()
log.debug('Download interrupted, removing datafile')
os.remove(datafile)
if isinstance(e, socket.timeout):
log.error('Timeout while downloading file')
else:
raise
else:
outfile.close()
# Do a sanity check on downloaded file
if os.path.getsize(datafile) == 0:
entry.fail('File %s is 0 bytes in size' % datafile)
os.remove(datafile)
return
# store temp filename into entry so other plugins may read and modify content
# temp file is moved into final destination at self.output
entry['file'] = datafile
log.debug('%s field file set to: %s', entry['title'], entry['file'])
if 'content-type' in response.headers:
entry['mime-type'] = str(parse_header(response.headers['content-type'])[0])
else:
entry['mime-type'] = "unknown/unknown"
content_encoding = response.headers.get('content-encoding', '')
decompress = 'gzip' in content_encoding or 'deflate' in content_encoding
if 'content-length' in response.headers and not decompress:
entry['content-length'] = int(response.headers['content-length'])
# prefer content-disposition naming, note: content-disposition can be disabled completely
# by setting entry field `content-disposition` to False
if entry.get('content-disposition', True):
self.filename_from_headers(entry, response)
else:
log.info('Content-disposition disabled for %s', entry['title'])
self.filename_ext_from_mime(entry)
if not entry.get('filename'):
filename = unquote(url.rsplit('/', 1)[1])
log.debug('No filename - setting from url: %s', filename)
entry['filename'] = filename
log.debug('Finishing download_entry() with filename %s', entry.get('filename'))
def filename_from_headers(self, entry, response):
"""Checks entry filename if it's found from content-disposition"""
if not response.headers.get('content-disposition'):
# No content disposition header, nothing we can do
return
filename = parse_header(response.headers['content-disposition'])[1].get('filename')
if filename:
# try to decode to unicode, specs allow latin1, some may do utf-8 anyway
try:
filename = native_str_to_text(filename, encoding='latin1')
log.debug('filename header latin1 decoded')
except UnicodeError:
try:
filename = native_str_to_text(filename, encoding='utf-8')
log.debug('filename header UTF-8 decoded')
except UnicodeError:
pass
filename = decode_html(filename)
log.debug('Found filename from headers: %s', filename)
if 'filename' in entry:
log.debug(
'Overriding filename %s with %s from content-disposition',
entry['filename'],
filename,
)
entry['filename'] = filename
def filename_ext_from_mime(self, entry):
"""Tries to set filename extension from mime-type"""
extensions = mimetypes.guess_all_extensions(entry['mime-type'], strict=False)
if extensions:
log.debug('Mimetype guess for %s is %s ', entry['mime-type'], extensions)
if entry.get('filename'):
if any(entry['filename'].endswith(extension) for extension in extensions):
log.debug('Filename %s extension matches to mime-type', entry['filename'])
else:
# mimetypes library has no concept of a 'prefered' extension when there are multiple possibilites
# this causes the first to be used which is not always desirable, e.g. 'ksh' for 'text/plain'
extension = mimetypes.guess_extension(entry['mime-type'], strict=False)
log.debug('Adding mime-type extension %s to %s', extension, entry['filename'])
entry['filename'] = entry['filename'] + extension
else:
log.debug('Python doesn\'t know extension for mime-type: %s', entry['mime-type'])
def on_task_output(self, task, config):
"""Move downloaded content from temp folder to final destination"""
config = self.process_config(config)
for entry in task.accepted:
try:
self.output(task, entry, config)
except plugin.PluginWarning as e:
entry.fail()
log.error('Plugin error while writing: %s', e)
except Exception as e:
entry.fail()
log.exception('Exception while writing: %s', e)
def output(self, task, entry, config):
"""Moves temp-file into final destination
Raises:
PluginError if operation fails
"""
if 'file' not in entry and not task.options.test:
log.debug('file missing, entry: %s', entry)
raise plugin.PluginError(
'Entry `%s` has no temp file associated with' % entry['title']
)
try:
# use path from entry if has one, otherwise use from download definition parameter
path = entry.get('path', config.get('path'))
if not isinstance(path, str):
raise plugin.PluginError('Invalid `path` in entry `%s`' % entry['title'])
# override path from command line parameter
if task.options.dl_path:
path = task.options.dl_path
# expand variables in path
try:
path = os.path.expanduser(entry.render(path))
except RenderError as e:
entry.fail('Could not set path. Error during string replacement: %s' % e)
return
# Clean illegal characters from path name
path = pathscrub(path)
# If we are in test mode, report and return
if task.options.test:
log.info('Would write `%s` to `%s`', entry['title'], path)
# Set a fake location, so the exec plugin can do string replacement during --test #1015
entry['location'] = os.path.join(path, 'TEST_MODE_NO_OUTPUT')
return
# make path
if not os.path.isdir(path):
log.debug('Creating directory %s', path)
try:
os.makedirs(path)
except:
raise plugin.PluginError('Cannot create path %s' % path, log)
# check that temp file is present
if not os.path.exists(entry['file']):
log.debug('entry: %s', entry)
raise plugin.PluginWarning(
'Downloaded temp file `%s` doesn\'t exist!?' % entry['file']
)
if config.get('filename'):
try:
entry['filename'] = entry.render(config['filename'])
log.debug('set filename from config %s' % entry['filename'])
except RenderError as e:
entry.fail('Could not set filename. Error during string replacement: %s' % e)
return
# if we still don't have a filename, try making one from title (last resort)
elif not entry.get('filename'):
entry['filename'] = entry['title']
log.debug('set filename from title %s', entry['filename'])
if 'mime-type' not in entry:
log.warning(
'Unable to figure proper filename for %s. Using title.', entry['title']
)
else:
guess = mimetypes.guess_extension(entry['mime-type'])
if not guess:
log.warning('Unable to guess extension with mime-type %s', guess)
else:
self.filename_ext_from_mime(entry)
name = entry.get('filename', entry['title'])
# Remove illegal characters from filename #325, #353
name = pathscrub(name)
# Remove directory separators from filename #208
name = name.replace('/', ' ')
if sys.platform.startswith('win'):
name = name.replace('\\', ' ')
# remove duplicate spaces
name = ' '.join(name.split())
# combine to full path + filename
destfile = os.path.join(path, name)
log.debug('destfile: %s', destfile)
if os.path.exists(destfile):
import filecmp
if filecmp.cmp(entry['file'], destfile):
log.debug("Identical destination file '%s' already exists", destfile)
elif config.get('overwrite'):
log.debug("Overwriting already existing file %s", destfile)
else:
log.info(
'File `%s` already exists and is not identical, download failed.', destfile
)
entry.fail('File `%s` already exists and is not identical.' % destfile)
return
else:
# move temp file
log.debug('moving %s to %s', entry['file'], destfile)
try:
shutil.move(entry['file'], destfile)
except (IOError, OSError) as err:
# ignore permission errors, see ticket #555
import errno
if not os.path.exists(destfile):
raise plugin.PluginError('Unable to write %s: %s' % (destfile, err))
if err.errno != errno.EPERM and err.errno != errno.EACCES:
raise
# store final destination as output key
entry['location'] = destfile
finally:
self.cleanup_temp_file(entry)
def on_task_learn(self, task, config):
"""Make sure all temp files are cleaned up after output phase"""
self.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure all temp files are cleaned up when task is aborted."""
self.cleanup_temp_files(task)
def cleanup_temp_file(self, entry):
if 'file' in entry:
if os.path.exists(entry['file']):
log.debug('removing temp file %s from %s', entry['file'], entry['title'])
os.remove(entry['file'])
if os.path.exists(os.path.dirname(entry['file'])):
shutil.rmtree(os.path.dirname(entry['file']))
del (entry['file'])
def cleanup_temp_files(self, task):
"""Checks all entries for leftover temp files and deletes them."""
for entry in task.entries + task.rejected + task.failed:
self.cleanup_temp_file(entry)
@event('plugin.register')
def register_plugin():
plugin.register(PluginDownload, 'download', api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument(
'--dl-path',
dest='dl_path',
default=False,
metavar='PATH',
help='override path for download plugin, applies to all executed tasks',
)
| mit |
eusoubrasileiro/fatiando | cookbook/gravmag_normal_gravity.py | 3 | 1882 | """
GravMag: Calculate the gravity disturbance and Bouguer anomaly for Hawaii
"""
from fatiando.gravmag import normal_gravity
from fatiando.vis import mpl
import numpy as np
import urllib
# Download the gravity and topography data
url = 'https://raw.githubusercontent.com/leouieda/geofisica1/master/data/'
urllib.urlretrieve(url + 'eigen-6c3stat-havai.gdf',
filename='eigen-6c3stat-havai.gdf')
urllib.urlretrieve(url + 'etopo1-havai.gdf',
filename='etopo1-havai.gdf')
# Load them with numpy
lon, lat, height, gravity = np.loadtxt('eigen-6c3stat-havai.gdf', skiprows=34,
unpack=True)
topo = np.loadtxt('etopo1-havai.gdf', skiprows=30, usecols=[-1], unpack=True)
shape = (151, 151)
area = (lon.min(), lon.max(), lat.min(), lat.max())
# First, lets calculate the gravity disturbance (e.g., the free-air anomaly)
# We'll do this using the closed form of the normal gravity for the WGS84
# ellipsoid
gamma = normal_gravity.gamma_closed_form(lat, height)
disturbance = gravity - gamma
# Now we can remove the effect of the Bouguer plate to obtain the Bouguer
# anomaly. We'll use the standard densities of 2.67 g.cm^-3 for crust and 1.04
# g.cm^-3 for water.
bouguer = disturbance - normal_gravity.bouguer_plate(topo)
mpl.figure(figsize=(14, 3.5))
bm = mpl.basemap(area, projection='merc')
mpl.subplot(131)
mpl.title('Gravity (mGal)')
mpl.contourf(lon, lat, gravity, shape, 60, cmap=mpl.cm.Reds, basemap=bm)
mpl.colorbar(pad=0)
mpl.subplot(132)
mpl.title('Gravity disturbance (mGal)')
amp = np.abs(disturbance).max()
mpl.contourf(lon, lat, disturbance, shape, 60, cmap=mpl.cm.RdBu_r, basemap=bm,
vmin=-amp, vmax=amp)
mpl.colorbar(pad=0)
mpl.subplot(133)
mpl.title('Bouguer anomaly (mGal)')
mpl.contourf(lon, lat, bouguer, shape, 60, cmap=mpl.cm.Reds, basemap=bm)
mpl.colorbar(pad=0)
mpl.show()
| bsd-3-clause |
SUNET/SATOSA | src/satosa/backends/saml2.py | 2 | 17798 | """
A saml2 backend module for the satosa proxy
"""
import copy
import functools
import json
import logging
from base64 import urlsafe_b64encode
from urllib.parse import urlparse
from saml2.client_base import Base
from saml2.config import SPConfig
from saml2.extension.ui import NAMESPACE as UI_NAMESPACE
from saml2.metadata import create_metadata_string
from saml2.authn_context import requested_authn_context
import satosa.util as util
from satosa.base import SAMLBaseModule
from satosa.base import SAMLEIDASBaseModule
from satosa.context import Context
from satosa.internal import AuthenticationInformation
from satosa.internal import InternalData
from satosa.exception import SATOSAAuthenticationError
from satosa.logging_util import satosa_logging
from satosa.response import SeeOther, Response
from satosa.saml_util import make_saml_response
from satosa.metadata_creation.description import (
MetadataDescription, OrganizationDesc, ContactPersonDesc, UIInfoDesc
)
from satosa.backends.base import BackendModule
from satosa.deprecated import SAMLInternalResponse
logger = logging.getLogger(__name__)
class SAMLBackend(BackendModule, SAMLBaseModule):
"""
A saml2 backend module (acting as a SP).
"""
KEY_DISCO_SRV = 'disco_srv'
KEY_SP_CONFIG = 'sp_config'
VALUE_ACR_COMPARISON_DEFAULT = 'exact'
def __init__(self, outgoing, internal_attributes, config, base_url, name):
"""
:type outgoing:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[str, dict[str, list[str] | str]]
:type config: dict[str, Any]
:type base_url: str
:type name: str
:param outgoing: Callback should be called by the module after
the authorization in the backend is done.
:param internal_attributes: Internal attribute map
:param config: The module config
:param base_url: base url of the service
:param name: name of the plugin
"""
super().__init__(outgoing, internal_attributes, base_url, name)
self.config = self.init_config(config)
sp_config = SPConfig().load(copy.deepcopy(config[self.KEY_SP_CONFIG]), False)
self.sp = Base(sp_config)
self.discosrv = config.get(self.KEY_DISCO_SRV)
self.encryption_keys = []
self.outstanding_queries = {}
self.idp_blacklist_file = config.get('idp_blacklist_file', None)
sp_keypairs = sp_config.getattr('encryption_keypairs', '')
sp_key_file = sp_config.getattr('key_file', '')
if sp_keypairs:
key_file_paths = [pair['key_file'] for pair in sp_keypairs]
elif sp_key_file:
key_file_paths = [sp_key_file]
else:
key_file_paths = []
for p in key_file_paths:
with open(p) as key_file:
self.encryption_keys.append(key_file.read())
def start_auth(self, context, internal_req):
"""
See super class method satosa.backends.base.BackendModule#start_auth
:type context: satosa.context.Context
:type internal_req: satosa.internal.InternalData
:rtype: satosa.response.Response
"""
target_entity_id = context.get_decoration(Context.KEY_TARGET_ENTITYID)
if target_entity_id:
entity_id = target_entity_id
return self.authn_request(context, entity_id)
# if there is only one IdP in the metadata, bypass the discovery service
idps = self.sp.metadata.identity_providers()
if len(idps) == 1 and "mdq" not in self.config["sp_config"]["metadata"]:
entity_id = idps[0]
return self.authn_request(context, entity_id)
return self.disco_query()
def disco_query(self):
"""
Makes a request to the discovery server
:type context: satosa.context.Context
:type internal_req: satosa.internal.InternalData
:rtype: satosa.response.SeeOther
:param context: The current context
:param internal_req: The request
:return: Response
"""
return_url = self.sp.config.getattr("endpoints", "sp")["discovery_response"][0][0]
loc = self.sp.create_discovery_service_request(self.discosrv, self.sp.config.entityid, **{"return": return_url})
return SeeOther(loc)
def construct_requested_authn_context(self, entity_id):
if not self.acr_mapping:
return None
acr_entry = util.get_dict_defaults(self.acr_mapping, entity_id)
if not acr_entry:
return None
if type(acr_entry) is not dict:
acr_entry = {
"class_ref": acr_entry,
"comparison": self.VALUE_ACR_COMPARISON_DEFAULT,
}
authn_context = requested_authn_context(
acr_entry['class_ref'], comparison=acr_entry.get(
'comparison', self.VALUE_ACR_COMPARISON_DEFAULT))
return authn_context
def authn_request(self, context, entity_id):
"""
Do an authorization request on idp with given entity id.
This is the start of the authorization.
:type context: satosa.context.Context
:type entity_id: str
:rtype: satosa.response.Response
:param context: The current context
:param entity_id: Target IDP entity id
:return: response to the user agent
"""
# If IDP blacklisting is enabled and the selected IDP is blacklisted,
# stop here
if self.idp_blacklist_file:
with open(self.idp_blacklist_file) as blacklist_file:
blacklist_array = json.load(blacklist_file)['blacklist']
if entity_id in blacklist_array:
satosa_logging(logger, logging.DEBUG, "IdP with EntityID {} is blacklisted".format(entity_id), context.state, exc_info=False)
raise SATOSAAuthenticationError(context.state, "Selected IdP is blacklisted for this backend")
kwargs = {}
authn_context = self.construct_requested_authn_context(entity_id)
if authn_context:
kwargs['requested_authn_context'] = authn_context
try:
binding, destination = self.sp.pick_binding(
"single_sign_on_service", None, "idpsso", entity_id=entity_id)
satosa_logging(logger, logging.DEBUG, "binding: %s, destination: %s" % (binding, destination),
context.state)
acs_endp, response_binding = self.sp.config.getattr("endpoints", "sp")["assertion_consumer_service"][0]
req_id, req = self.sp.create_authn_request(
destination, binding=response_binding, **kwargs)
relay_state = util.rndstr()
ht_args = self.sp.apply_binding(binding, "%s" % req, destination, relay_state=relay_state)
satosa_logging(logger, logging.DEBUG, "ht_args: %s" % ht_args, context.state)
except Exception as exc:
satosa_logging(logger, logging.DEBUG, "Failed to construct the AuthnRequest for state", context.state,
exc_info=True)
raise SATOSAAuthenticationError(context.state, "Failed to construct the AuthnRequest") from exc
if self.sp.config.getattr('allow_unsolicited', 'sp') is False:
if req_id in self.outstanding_queries:
errmsg = "Request with duplicate id {}".format(req_id)
satosa_logging(logger, logging.DEBUG, errmsg, context.state)
raise SATOSAAuthenticationError(context.state, errmsg)
self.outstanding_queries[req_id] = req
context.state[self.name] = {"relay_state": relay_state}
return make_saml_response(binding, ht_args)
def authn_response(self, context, binding):
"""
Endpoint for the idp response
:type context: satosa.context,Context
:type binding: str
:rtype: satosa.response.Response
:param context: The current context
:param binding: The saml binding type
:return: response
"""
if not context.request["SAMLResponse"]:
satosa_logging(logger, logging.DEBUG, "Missing Response for state", context.state)
raise SATOSAAuthenticationError(context.state, "Missing Response")
try:
authn_response = self.sp.parse_authn_request_response(
context.request["SAMLResponse"],
binding, outstanding=self.outstanding_queries)
except Exception as err:
satosa_logging(logger, logging.DEBUG, "Failed to parse authn request for state", context.state,
exc_info=True)
raise SATOSAAuthenticationError(context.state, "Failed to parse authn request") from err
if self.sp.config.getattr('allow_unsolicited', 'sp') is False:
req_id = authn_response.in_response_to
if req_id not in self.outstanding_queries:
errmsg = "No request with id: {}".format(req_id),
satosa_logging(logger, logging.DEBUG, errmsg, context.state)
raise SATOSAAuthenticationError(context.state, errmsg)
del self.outstanding_queries[req_id]
# check if the relay_state matches the cookie state
if context.state[self.name]["relay_state"] != context.request["RelayState"]:
satosa_logging(logger, logging.DEBUG,
"State did not match relay state for state", context.state)
raise SATOSAAuthenticationError(context.state, "State did not match relay state")
context.decorate(Context.KEY_BACKEND_METADATA_STORE, self.sp.metadata)
del context.state[self.name]
return self.auth_callback_func(context, self._translate_response(authn_response, context.state))
def disco_response(self, context):
"""
Endpoint for the discovery server response
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response
"""
info = context.request
state = context.state
try:
entity_id = info["entityID"]
except KeyError as err:
satosa_logging(logger, logging.DEBUG, "No IDP chosen for state", state, exc_info=True)
raise SATOSAAuthenticationError(state, "No IDP chosen") from err
return self.authn_request(context, entity_id)
def _translate_response(self, response, state):
"""
Translates a saml authorization response to an internal response
:type response: saml2.response.AuthnResponse
:rtype: satosa.internal.InternalData
:param response: The saml authorization response
:return: A translated internal response
"""
# The response may have been encrypted by the IdP so if we have an
# encryption key, try it.
if self.encryption_keys:
response.parse_assertion(self.encryption_keys)
authn_info = response.authn_info()[0]
auth_class_ref = authn_info[0]
timestamp = response.assertion.authn_statement[0].authn_instant
issuer = response.response.issuer.text
auth_info = AuthenticationInformation(
auth_class_ref, timestamp, issuer,
)
# The SAML response may not include a NameID.
subject = response.get_subject()
name_id = subject.text if subject else None
name_id_format = subject.format if subject else None
attributes = self.converter.to_internal(
self.attribute_profile, response.ava,
)
internal_resp = InternalData(
auth_info=auth_info,
attributes=attributes,
subject_type=name_id_format,
subject_id=name_id,
)
satosa_logging(logger, logging.DEBUG,
"backend received attributes:\n%s" %
json.dumps(response.ava, indent=4), state)
return internal_resp
def _metadata_endpoint(self, context):
"""
Endpoint for retrieving the backend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
"""
satosa_logging(logger, logging.DEBUG, "Sending metadata response", context.state)
metadata_string = create_metadata_string(None, self.sp.config, 4, None, None, None, None,
None).decode("utf-8")
return Response(metadata_string, content="text/xml")
def register_endpoints(self):
"""
See super class method satosa.backends.base.BackendModule#register_endpoints
:rtype list[(str, ((satosa.context.Context, Any) -> Any, Any))]
"""
url_map = []
sp_endpoints = self.sp.config.getattr("endpoints", "sp")
for endp, binding in sp_endpoints["assertion_consumer_service"]:
parsed_endp = urlparse(endp)
url_map.append(("^%s$" % parsed_endp.path[1:], functools.partial(self.authn_response, binding=binding)))
if self.discosrv:
for endp, binding in sp_endpoints["discovery_response"]:
parsed_endp = urlparse(endp)
url_map.append(
("^%s$" % parsed_endp.path[1:], self.disco_response))
if self.expose_entityid_endpoint():
parsed_entity_id = urlparse(self.sp.config.entityid)
url_map.append(("^{0}".format(parsed_entity_id.path[1:]),
self._metadata_endpoint))
return url_map
def get_metadata_desc(self):
"""
See super class satosa.backends.backend_base.BackendModule#get_metadata_desc
:rtype: satosa.metadata_creation.description.MetadataDescription
"""
entity_descriptions = []
idp_entities = self.sp.metadata.with_descriptor("idpsso")
for entity_id, entity in idp_entities.items():
description = MetadataDescription(urlsafe_b64encode(entity_id.encode("utf-8")).decode("utf-8"))
# Add organization info
try:
organization_info = entity["organization"]
except KeyError:
pass
else:
organization = OrganizationDesc()
for name_info in organization_info.get("organization_name", []):
organization.add_name(name_info["text"], name_info["lang"])
for display_name_info in organization_info.get("organization_display_name", []):
organization.add_display_name(display_name_info["text"], display_name_info["lang"])
for url_info in organization_info.get("organization_url", []):
organization.add_url(url_info["text"], url_info["lang"])
description.organization = organization
# Add contact person info
try:
contact_persons = entity["contact_person"]
except KeyError:
pass
else:
for person in contact_persons:
person_desc = ContactPersonDesc()
person_desc.contact_type = person.get("contact_type")
for address in person.get('email_address', []):
person_desc.add_email_address(address["text"])
if "given_name" in person:
person_desc.given_name = person["given_name"]["text"]
if "sur_name" in person:
person_desc.sur_name = person["sur_name"]["text"]
description.add_contact_person(person_desc)
# Add UI info
ui_info = self.sp.metadata.extension(entity_id, "idpsso_descriptor", "{}&UIInfo".format(UI_NAMESPACE))
if ui_info:
ui_info = ui_info[0]
ui_info_desc = UIInfoDesc()
for desc in ui_info.get("description", []):
ui_info_desc.add_description(desc["text"], desc["lang"])
for name in ui_info.get("display_name", []):
ui_info_desc.add_display_name(name["text"], name["lang"])
for logo in ui_info.get("logo", []):
ui_info_desc.add_logo(logo["text"], logo["width"], logo["height"], logo.get("lang"))
description.ui_info = ui_info_desc
entity_descriptions.append(description)
return entity_descriptions
class SAMLEIDASBackend(SAMLBackend, SAMLEIDASBaseModule):
"""
A saml2 eidas backend module (acting as a SP).
"""
VALUE_ACR_CLASS_REF_DEFAULT = 'http://eidas.europa.eu/LoA/high'
VALUE_ACR_COMPARISON_DEFAULT = 'minimum'
def init_config(self, config):
config = super().init_config(config)
spec_eidas_sp = {
'acr_mapping': {
"": {
'class_ref': self.VALUE_ACR_CLASS_REF_DEFAULT,
'comparison': self.VALUE_ACR_COMPARISON_DEFAULT,
},
},
'sp_config.service.sp.authn_requests_signed': True,
'sp_config.service.sp.want_response_signed': True,
'sp_config.service.sp.allow_unsolicited': False,
'sp_config.service.sp.force_authn': True,
'sp_config.service.sp.hide_assertion_consumer_service': True,
'sp_config.service.sp.sp_type': ['private', 'public'],
'sp_config.service.sp.sp_type_in_metadata': [True, False],
}
return util.check_set_dict_defaults(config, spec_eidas_sp)
| apache-2.0 |
crmccreary/openerp_server | openerp/addons/project/project.py | 3 | 61446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import time
from datetime import datetime, date
from tools.translate import _
from osv import fields, osv
from openerp.addons.resource.faces import task as Task
# I think we can remove this in v6.1 since VMT's improvements in the framework ?
#class project_project(osv.osv):
# _name = 'project.project'
#project_project()
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, size=64, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'project_default': fields.boolean('Common to All Projects', help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
}
_defaults = {
'sequence': 1
}
_order = 'sequence'
project_task_type()
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id"}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.analytic_account_id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def _complete_name(self, cr, uid, ids, name, args, context=None):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = (m.parent_id and (m.parent_id.name + '/') or '') + m.name
return res
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
if not part:
return {'value':{'contact_id': False}}
addr = partner_obj.address_get(cr, uid, [part], ['contact'])
val = {'contact_id': addr['contact']}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task WHERE project_id IN %s AND state <> 'cancelled'
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0,'total_hours':0.0,'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, *args, **kwargs):
for proj in self.browse(cr, uid, ids):
if proj.tasks:
raise osv.except_osv(_('Operation Not Permitted !'), _('You cannot delete a project containing tasks. I suggest you to desactivate it.'))
return super(project, self).unlink(cr, uid, ids, *args, **kwargs)
_columns = {
'complete_name': fields.function(_complete_name, string="Project Name", type='char', size=250),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', help="Link this project to an analytic account if you need financial management on projects. It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.", ondelete="cascade", required=True),
'priority': fields.integer('Sequence', help="Gives the sequence order when displaying the list of projects"),
'warn_manager': fields.boolean('Warn Manager', help="If you check this field, the project manager will receive an email each time a task is completed by his team.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Project tasks"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'state'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'warn_customer': fields.boolean('Warn Partner', help="If you check this, the user will have a popup when closing a task that propose a message to send by email to the customer.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'warn_header': fields.text('Mail Header', help="Header added at the beginning of the email for the warning message sent to the customer when a task is closed.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'warn_footer': fields.text('Mail Footer', help="Footer added at the beginning of the email for the warning message sent to the customer when a task is closed.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('project_default','=',1)], context=context)
return ids
_order = "sequence"
_defaults = {
'active': True,
'priority': 1,
'sequence': 10,
'type_ids': _get_type_common
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower then project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
res = self.setActive(cr, uid, ids, value=False, context=context)
return res
def set_done(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
task_ids = task_obj.search(cr, uid, [('project_id', 'in', ids), ('state', 'not in', ('cancelled', 'done'))])
task_obj.write(cr, uid, task_ids, {'state': 'done', 'date_end':time.strftime('%Y-%m-%d %H:%M:%S'), 'remaining_hours': 0.0})
self.write(cr, uid, ids, {'state':'close'}, context=context)
for (id, name) in self.name_get(cr, uid, ids):
message = _("The project '%s' has been closed.") % name
self.log(cr, uid, id, message)
return True
def set_cancel(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
task_ids = task_obj.search(cr, uid, [('project_id', 'in', ids), ('state', '!=', 'done')])
task_obj.write(cr, uid, task_ids, {'state': 'cancelled', 'date_end':time.strftime('%Y-%m-%d %H:%M:%S'), 'remaining_hours': 0.0})
self.write(cr, uid, ids, {'state':'cancelled'}, context=context)
return True
def set_pending(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'pending'}, context=context)
return True
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def reset_project(self, cr, uid, ids, context=None):
res = self.setActive(cr, uid, ids, value=True, context=context)
for (id, name) in self.name_get(cr, uid, ids):
message = _("The project '%s' has been opened.") % name
self.log(cr, uid, id, message)
return res
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, {}, context=context)
self.write(cr, uid, new_project_id, {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default={}, context=None):
if context is None:
context = {}
default = default or {}
context['active_test'] = False
default['state'] = 'open'
default['tasks'] = []
proj = self.browse(cr, uid, id, context=context)
if not default.get('name', False):
default['name'] = proj.name + _(' (copy)')
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr,uid,id,res,context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
if context is None:
context = {}
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name': proj.name +_(' (copy)'),
'state':'open',
'date_start':new_date_start,
'date':new_date_end,
'parent_id':parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning !'),_("You must assign members on the project '%s' !") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.state in ('done','cancelled'):
continue
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result +='''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start, working_days,
'|'.join(['User_'+str(x) for x in puids])
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result+= """
vacation = %s
""" % ( vacation, )
return result
#TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.state in ('done','cancelled'):
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
project()
class users(osv.osv):
_inherit = 'res.users'
_columns = {
'context_project_id': fields.many2one('project.project', 'Project')
}
users()
class task(osv.osv):
_name = "project.task"
_description = "Task"
_log_create = True
_date_name = "date_start"
def _resolve_project_id_from_context(self, cr, uid, context=None):
"""Return ID of project based on the value of 'project_id'
context key, or None if it cannot be resolved to a single project.
"""
if context is None: context = {}
if type(context.get('project_id')) in (int, long):
project_id = context['project_id']
return project_id
if isinstance(context.get('project_id'), basestring):
project_name = context['project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name)
if len(project_ids) == 1:
return project_ids[0][0]
def _read_group_type_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'type_id desc':
# lame way to allow reverting search, should just work in the trivial case
order = '%s desc' % order
if project_id:
domain = ['|', ('id','in',ids), ('project_ids','in',project_id)]
else:
domain = ['|', ('id','in',ids), ('project_default','=',1)]
stage_ids = stage_obj._search(cr, uid, domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
return result
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)['members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id','in',ids)], order=order, access_rights_uid=access_rights_uid, context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(ids.index(x[0]), ids.index(y[0])))
return result
_group_by_full = {
'type_id': _read_group_type_id,
'user_id': _read_group_user_id
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
obj_project = self.pool.get('project.project')
for domain in args:
if domain[0] == 'project_id' and (not isinstance(domain[2], str)):
id = isinstance(domain[2], list) and domain[2][0] or domain[2]
if id and isinstance(id, (long, int)):
if obj_project.read(cr, user, id, ['state'])['state'] == 'template':
args.append(('active', '=', False))
return super(task, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _str_get(self, task, level=0, border='***', context=None):
return border+' '+(task.user_id and task.user_id.name.upper() or '')+(level and (': L'+str(level)) or '')+(' - %.1fh / %.1fh'%(task.effective_hours or 0.0,task.planned_hours))+' '+border+'\n'+ \
border[0]+' '+(task.name or '')+'\n'+ \
(task.description or '')+'\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0), 'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if (task.remaining_hours + hours.get(task.id, 0.0)):
res[task.id]['progress'] = round(min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99),2)
if task.state in ('done','cancelled'):
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned = 0.0):
if remaining and not planned:
return {'value':{'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned = 0.0, effective = 0.0):
return {'value':{'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id):
if not project_id:
return {}
data = self.pool.get('project.project').browse(cr, uid, [project_id])
partner_id=data and data[0].partner_id
if partner_id:
return {'value':{'partner_id':partner_id.id}}
return {}
def duplicate_task(self, cr, uid, map_ids, context=None):
for new in map_ids.values():
task = self.browse(cr, uid, new, context)
child_ids = [ ch.id for ch in task.child_ids]
if task.child_ids:
for child in task.child_ids:
if child.id in map_ids.keys():
child_ids.remove(child.id)
child_ids.append(map_ids[child.id])
parent_ids = [ ch.id for ch in task.parent_ids]
if task.parent_ids:
for parent in task.parent_ids:
if parent.id in map_ids.keys():
parent_ids.remove(parent.id)
parent_ids.append(map_ids[parent.id])
#FIXME why there is already the copy and the old one
self.write(cr, uid, new, {'parent_ids':[(6,0,set(parent_ids))], 'child_ids':[(6,0, set(child_ids))]})
def copy_data(self, cr, uid, id, default={}, context=None):
default = default or {}
default.update({'work_ids':[], 'date_start': False, 'date_end': False, 'date_deadline': False})
if not default.get('remaining_hours', False):
default['remaining_hours'] = float(self.read(cr, uid, id, ['planned_hours'])['planned_hours'])
default['active'] = True
default['type_id'] = False
if not default.get('name', False):
default['name'] = self.browse(cr, uid, id, context=context).name or ''
if not context.get('copy',False):
new_name = _("%s (copy)")%default.get('name','')
default.update({'name':new_name})
return super(task, self).copy_data(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean', help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', size=128, required=True, select=True),
'description': fields.text('Description'),
'priority': fields.selection([('4','Very Low'), ('3','Low'), ('2','Medium'), ('1','Important'), ('0','Very important')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'type_id': fields.many2one('project.task.type', 'Stage'),
'state': fields.selection([('draft', 'New'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')], 'State', readonly=True, required=True,
help='If the task is created the state is \'Draft\'.\n If the task is started, the state becomes \'In Progress\'.\n If review is needed the task is in \'Pending\' state.\
\n If the task is over, the states is set to \'Done\'.'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready To Pull')], 'Kanban State',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready To Pull indicates the task is ready to be pulled to the next stage",
readonly=True, required=False),
'create_date': fields.datetime('Create Date', readonly=True,select=True),
'date_start': fields.datetime('Starting Date',select=True),
'date_end': fields.datetime('Ending Date',select=True),
'date_deadline': fields.date('Deadline',select=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select="1"),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total Hours', multi='hours', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours','state'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'user_id': fields.many2one('res.users', 'Assigned to'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users', string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'user_email', type='char', string='User Email', readonly=True),
}
_defaults = {
'state': 'draft',
'kanban_state': 'normal',
'priority': '2',
'progress': 0,
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=c)
}
_order = "priority, sequence, date_start, name, id"
def set_priority(self, cr, uid, ids, priority):
"""Set task priority
"""
return self.write(cr, uid, ids, {'priority' : priority})
def set_high_priority(self, cr, uid, ids, *args):
"""Set task priority to high
"""
return self.set_priority(cr, uid, ids, '1')
def set_normal_priority(self, cr, uid, ids, *args):
"""Set task priority to normal
"""
return self.set_priority(cr, uid, ids, '2')
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater then task start-date', ['date_start','date_end'])
]
#
# Override view according to the company definition
#
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, 1, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for f in res['fields']:
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours',tm)
return res
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.state in ['draft', 'open', 'pending']:
raise osv.except_osv(_("Warning !"), _("Child task still open.\nPlease cancel or complete child task first."))
return True
def action_close(self, cr, uid, ids, context=None):
# This action open wizard to send email to partner or project manager after close task.
if context == None:
context = {}
task_id = len(ids) and ids[0] or False
self._check_child_task(cr, uid, ids, context=context)
if not task_id: return False
task = self.browse(cr, uid, task_id, context=context)
project = task.project_id
res = self.do_close(cr, uid, [task_id], context=context)
if project.warn_manager or project.warn_customer:
return {
'name': _('Send Email after close task'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'type': 'ir.actions.act_window',
'target': 'new',
'nodestroy': True,
'context': {'active_id': task.id,
'active_model': 'project.task'}
}
return res
def do_close(self, cr, uid, ids, context={}):
"""
Close Task
"""
request = self.pool.get('res.request')
if not isinstance(ids,list): ids = [ids]
for task in self.browse(cr, uid, ids, context=context):
vals = {}
project = task.project_id
if project:
# Send request to project manager
if project.warn_manager and project.user_id and (project.user_id.id != uid):
request.create(cr, uid, {
'name': _("Task '%s' closed") % task.name,
'state': 'waiting',
'act_from': uid,
'act_to': project.user_id.id,
'ref_partner_id': task.partner_id.id,
'ref_doc1': 'project.task,%d'% (task.id,),
'ref_doc2': 'project.project,%d'% (project.id,),
}, context=context)
for parent_id in task.parent_ids:
if parent_id.state in ('pending','draft'):
reopen = True
for child in parent_id.child_ids:
if child.id != task.id and child.state not in ('done','cancelled'):
reopen = False
if reopen:
self.do_reopen(cr, uid, [parent_id.id], context=context)
vals.update({'state': 'done'})
vals.update({'remaining_hours': 0.0})
if not task.date_end:
vals.update({ 'date_end':time.strftime('%Y-%m-%d %H:%M:%S')})
self.write(cr, uid, [task.id],vals, context=context)
message = _("The task '%s' is done") % (task.name,)
self.log(cr, uid, task.id, message)
return True
def do_reopen(self, cr, uid, ids, context=None):
request = self.pool.get('res.request')
for task in self.browse(cr, uid, ids, context=context):
project = task.project_id
if project and project.warn_manager and project.user_id.id and (project.user_id.id != uid):
request.create(cr, uid, {
'name': _("Task '%s' set in progress") % task.name,
'state': 'waiting',
'act_from': uid,
'act_to': project.user_id.id,
'ref_partner_id': task.partner_id.id,
'ref_doc1': 'project.task,%d' % task.id,
'ref_doc2': 'project.project,%d' % project.id,
}, context=context)
self.write(cr, uid, [task.id], {'state': 'open'}, context=context)
return True
def do_cancel(self, cr, uid, ids, context={}):
request = self.pool.get('res.request')
tasks = self.browse(cr, uid, ids, context=context)
self._check_child_task(cr, uid, ids, context=context)
for task in tasks:
project = task.project_id
if project.warn_manager and project.user_id and (project.user_id.id != uid):
request.create(cr, uid, {
'name': _("Task '%s' cancelled") % task.name,
'state': 'waiting',
'act_from': uid,
'act_to': project.user_id.id,
'ref_partner_id': task.partner_id.id,
'ref_doc1': 'project.task,%d' % task.id,
'ref_doc2': 'project.project,%d' % project.id,
}, context=context)
message = _("The task '%s' is cancelled.") % (task.name,)
self.log(cr, uid, task.id, message)
self.write(cr, uid, [task.id], {'state': 'cancelled', 'remaining_hours':0.0}, context=context)
return True
def do_open(self, cr, uid, ids, context={}):
if not isinstance(ids,list): ids = [ids]
tasks= self.browse(cr, uid, ids, context=context)
for t in tasks:
data = {'state': 'open'}
if not t.date_start:
data['date_start'] = time.strftime('%Y-%m-%d %H:%M:%S')
self.write(cr, uid, [t.id], data, context=context)
message = _("The task '%s' is opened.") % (t.name,)
self.log(cr, uid, t.id, message)
return True
def do_draft(self, cr, uid, ids, context={}):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
return True
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)], context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data={}, context=None):
"""
Delegate Task to another users.
"""
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'state': 'draft',
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
if delegate_data['state'] == 'pending':
self.do_pending(cr, uid, task.id, context=context)
elif delegate_data['state'] == 'done':
self.do_close(cr, uid, task.id, context=context)
message = _("The task '%s' has been delegated to %s.") % (delegate_data['name'], delegate_data['user_id'][1])
self.log(cr, uid, task.id, message)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def do_pending(self, cr, uid, ids, context={}):
self.write(cr, uid, ids, {'state': 'pending'}, context=context)
for (id, name) in self.name_get(cr, uid, ids):
message = _("The task '%s' is pending.") % name
self.log(cr, uid, id, message)
return True
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.state=='draft') or (task.planned_hours==0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time}, context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def set_kanban_state_blocked(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'kanban_state': 'blocked'}, context=context)
def set_kanban_state_normal(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'kanban_state': 'normal'}, context=context)
def set_kanban_state_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'kanban_state': 'done'}, context=context)
def _change_type(self, cr, uid, ids, next, *args):
"""
go to the next stage
if next is False, go to previous stage
"""
for task in self.browse(cr, uid, ids):
if task.project_id.type_ids:
typeid = task.type_id.id
types_seq={}
for type in task.project_id.type_ids :
types_seq[type.id] = type.sequence
if next:
types = sorted(types_seq.items(), lambda x, y: cmp(x[1], y[1]))
else:
types = sorted(types_seq.items(), lambda x, y: cmp(y[1], x[1]))
sorted_types = [x[0] for x in types]
if not typeid:
self.write(cr, uid, task.id, {'type_id': sorted_types[0]})
elif typeid and typeid in sorted_types and sorted_types.index(typeid) != len(sorted_types)-1:
index = sorted_types.index(typeid)
self.write(cr, uid, task.id, {'type_id': sorted_types[index+1]})
return True
def next_type(self, cr, uid, ids, *args):
return self._change_type(cr, uid, ids, True, *args)
def prev_type(self, cr, uid, ids, *args):
return self._change_type(cr, uid, ids, False, *args)
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.type_id.id,
'state': task.state,
'user_id': task.user_id.id
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
result = super(task, self).create(cr, uid, vals, context=context)
self._store_history(cr, uid, [result], context=context)
return result
# Overridden to reset the kanban_state to normal whenever
# the stage (type_id) of the task changes.
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if vals and not 'kanban_state' in vals and 'type_id' in vals:
new_stage = vals.get('type_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.type_id != new_stage else vals
super(task,self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task,self).write(cr, uid, ids, vals, context=context)
if ('type_id' in vals) or ('remaining_hours' in vals) or ('user_id' in vals) or ('state' in vals) or ('kanban_state' in vals):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
for task in tasks:
if task.state in ('done','cancelled'):
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2fH\"
%s effort = \"%.2fH\"''' % (ident,task.id, ident,task.remaining_hours, ident,task.total_hours)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
task()
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary', size=128),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, *args, **kwargs):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s', (vals.get('hours',0.0), vals['task_id']))
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s', (vals.get('hours',0.0), work.hours, work.task_id.id))
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, *args, **kwargs):
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s', (work.hours, work.task_id.id))
return super(project_work,self).unlink(cr, uid, ids,*args, **kwargs)
project_work()
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
return super(account_analytic_account, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
project_obj = self.pool.get('project.project')
analytic_ids = project_obj.search(cr, uid, [('analytic_account_id','in',ids)])
if analytic_ids:
raise osv.except_osv(_('Warning !'), _('Please delete the project linked with this account first.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, *args, **kwargs)
account_analytic_account()
#
# Tasks History, used for cumulative flow charts (Lean/Agile)
#
class project_task_history(osv.osv):
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.state in ('done','cancelled'):
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'state': fields.selection([('draft', 'New'),('open', 'In Progress'),('pending', 'Pending'), ('done', 'Done'), ('cancelled', 'Cancelled')], 'State'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready To Pull')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16,2)),
'planned_hours': fields.float('Planned Time', digits=(16,2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
project_task_history()
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'project_id': fields.related('task_id', 'project_id', string='Project', type='many2one', relation='project.project')
}
def init(self, cr):
cr.execute(""" CREATE OR REPLACE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar as id,
history.date as end_date,
*
FROM (
SELECT
id as history_id,
date+generate_series(0, CAST((coalesce(end_date,DATE 'tomorrow')::date - date)AS integer)-1) as date,
task_id, type_id, user_id, kanban_state, state,
remaining_hours, planned_hours
FROM
project_task_history
) as history
)
""")
project_task_history_cumulative()
| agpl-3.0 |
audip/lunr | lunr/common/lock.py | 2 | 5070 | # Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import json
class NullResource(object):
""" Implments the lock interface for spawn. """
def __init__(self, *args, **kwargs):
self.owned = False
def remove(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace):
pass
def acquire(self, info):
pass
class LockFile(object):
""" Manages locking and unlocking an open file handle
can also be used as a context manager
"""
def __init__(self, fd, lock_operation=fcntl.LOCK_EX,
unlock_operation=fcntl.LOCK_UN):
self.fd = fd
self.file_name = None
if type(fd) != int:
self.fd = self.open(fd)
self.file_name = fd
self.lock_operation = lock_operation
self.unlock_operation = unlock_operation
def __enter__(self):
self.lock(self.lock_operation)
return self
def __exit__(self, exc_type, exc_value, trace):
self.unlock(self.unlock_operation)
return False
def lock(self, operation=fcntl.LOCK_EX):
fcntl.flock(self.fd, operation)
def unlock(self, operation=fcntl.LOCK_UN):
fcntl.flock(self.fd, operation)
def write(self, data):
os.lseek(self.fd, 0, os.SEEK_SET)
os.ftruncate(self.fd, 0)
os.write(self.fd, data)
os.fsync(self.fd)
def read(self):
size = os.lseek(self.fd, 0, os.SEEK_END)
os.lseek(self.fd, 0, os.SEEK_SET)
return os.read(self.fd, size)
def close(self):
try:
os.close(self.fd)
except TypeError, OSError:
pass
self.fd = None
def unlink(self):
self.close()
try:
os.unlink(self.file_name)
except OSError, e:
pass
def _createdir(self, file_name):
try:
dir = os.path.dirname(file_name)
os.makedirs(dir)
except OSError, e:
# ignore if already exists
if e.errno != errno.EEXIST:
raise
def open(self, file_name):
for i in range(0, 2):
try:
# Attempt to create the file
return os.open(file_name, os.O_RDWR | os.O_CREAT)
except OSError, e:
# No such file or directory
if e.errno == errno.ENOENT:
# create the dir and try again
self._createdir(file_name)
continue
# Unknown error
raise
raise RuntimeError("failed to create '%s'" % file_name)
class JsonLockFile(LockFile):
""" Manages a lock file that contains json """
def update(self, info):
data = self.read()
data.update(info)
self.write(data)
def get(self, key, default=None):
try:
data = self.read()
return data[key]
except KeyError:
return default
def write(self, data):
super(JsonLockFile, self).write(json.dumps(data))
def read(self):
try:
return json.loads(super(JsonLockFile, self).read())
except ValueError, e:
return {}
class ResourceFile(JsonLockFile):
""" Manages ownership of a resource file,
can also be used as a context manager
"""
def __init__(self, file_name):
self.file_name = file_name
self.owned = False
self.fd = None
def __enter__(self):
self.fd = self.open(self.file_name)
super(ResourceFile, self).lock()
return self
def __exit__(self, exc_type, exc_value, trace):
super(ResourceFile, self).unlock()
self.close()
return False
def used(self):
""" Returns true if the resource file is in use by someone """
info = self.read()
# If pid is alive, the volume is owned by someone else
if 'pid' in info and self.alive(info['pid']):
return info
return False
def alive(self, pid):
try:
os.kill(pid, 0)
return True
except OSError, e:
return False
def acquire(self, info):
""" Acquire ownership of the file by writing our pid information """
self.update(info)
if 'pid' in info:
# We own the resource
self.owned = True
def remove(self):
if self.owned:
self.unlink()
| apache-2.0 |
imcsk8/kubernetes | hack/update_owners.py | 149 | 9054 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import json
import os
import random
import re
import subprocess
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
OWNERS_JSON_PATH = OWNERS_PATH.replace('.csv', '.json')
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, cols in enumerate(csv.reader(f)):
if n == 0:
continue # header
if len(cols) == 3:
# migrate from previous version without sig
(name, owner, random_assignment), sig = cols, ""
else:
(name, owner, random_assignment, sig) = cols
owners[normalize(name)] = (owner, int(random_assignment), sig)
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned', 'sig'])
items = sorted(owners.items())
for name, (owner, random_assignment, sig) in items:
out.writerow([name, owner, int(random_assignment), sig])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "tallclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def sig_prefixes(owners):
# TODO(rmmh): make sig prefixes the only thing in test_owners!
# Precise test names aren't very interesting.
owns = []
for test, (owner, random_assignment, sig) in owners.iteritems():
if 'k8s.io/' in test or not sig:
continue
owns.append([test, sig])
while True:
owns.sort()
for name, sig in owns:
# try removing the last word in the name, use it if all tests beginning
# with this shorter name share the same sig.
maybe_prefix = ' '.join(name.split()[:-1])
matches = [other_sig == sig for other_name, other_sig in owns if other_name.startswith(maybe_prefix)]
if matches and all(matches):
owns = [[n, s] for n, s in owns if not n.startswith(maybe_prefix)]
owns.append([maybe_prefix, sig])
break
else: # iterated completely through owns without any changes
break
sigs = {}
for name, sig in owns:
sigs.setdefault(sig, []).append(name)
return json.dumps(sigs, sort_keys=True, indent=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
parser.add_argument('--print_sig_prefixes', action='store_true', help='Emit SIG prefixes for matching.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
prefixes = sig_prefixes(owners)
with open(OWNERS_JSON_PATH, 'w') as f:
f.write(prefixes + '\n')
if options.print_sig_prefixes:
print prefixes
return
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment, _) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random, sig) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment, "")
if options.user.lower() == 'random':
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
| apache-2.0 |
kybriainfotech/iSocioCRM | addons/crm_partner_assign/__openerp__.py | 244 | 2369 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner Assignation & Geolocation',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
This is the module used by OpenERP SA to redirect customers to its partners, based on geolocation.
======================================================================================================
This modules lets you geolocate Leads, Opportunities and Partners based on their address.
Once the coordinates of the Lead/Opportunity is known, they can be automatically assigned
to an appropriate local partner, based on the distance and the weight that was assigned to the partner.
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'crm', 'account', 'portal'],
'data': [
'security/ir.model.access.csv',
'res_partner_view.xml',
'wizard/crm_forward_to_partner_view.xml',
'wizard/crm_channel_interested_view.xml',
'crm_lead_view.xml',
'crm_partner_assign_data.xml',
'crm_portal_view.xml',
'portal_data.xml',
'report/crm_lead_report_view.xml',
'report/crm_partner_report_view.xml',
],
'demo': [
'res_partner_demo.xml',
'crm_lead_demo.xml'
],
'test': ['test/partner_assign.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chrisnatali/networkx | networkx/algorithms/tests/test_threshold.py | 4 | 6663 | #!/usr/bin/env python
"""Threshold Graphs
================
"""
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx as nx
import networkx.algorithms.threshold as nxt
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
cnlti = nx.convert_node_labels_to_integers
class TestGeneratorThreshold():
def test_threshold_sequence_graph_test(self):
G=nx.star_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(d for n, d in G.degree())))
G=nx.complete_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(d for n, d in G.degree())))
deg=[3,2,2,1,1,1]
assert_false(nxt.is_threshold_sequence(deg))
deg=[3,2,2,1]
assert_true(nxt.is_threshold_sequence(deg))
G=nx.generators.havel_hakimi_graph(deg)
assert_true(nxt.is_threshold_graph(G))
def test_creation_sequences(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs0=nxt.creation_sequence(deg)
H0=nxt.threshold_graph(cs0)
assert_equal(''.join(cs0), 'ddid')
cs1=nxt.creation_sequence(deg, with_labels=True)
H1=nxt.threshold_graph(cs1)
assert_equal(cs1, [(1, 'd'), (2, 'd'), (3, 'i'), (0, 'd')])
cs2=nxt.creation_sequence(deg, compact=True)
H2=nxt.threshold_graph(cs2)
assert_equal(cs2, [2, 1, 1])
assert_equal(''.join(nxt.uncompact(cs2)), 'ddid')
assert_true(graph_could_be_isomorphic(H0,G))
assert_true(graph_could_be_isomorphic(H0,H1))
assert_true(graph_could_be_isomorphic(H0,H2))
def test_shortest_path(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs1=nxt.creation_sequence(deg, with_labels=True)
for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3),
(3, 1), (1, 2), (2, 3)]:
assert_equal(nxt.shortest_path(cs1,n,m),
nx.shortest_path(G, n, m))
spl=nxt.shortest_path_length(cs1,3)
spl2=nxt.shortest_path_length([ t for v,t in cs1],2)
assert_equal(spl, spl2)
spld={}
for j,pl in enumerate(spl):
n=cs1[j][0]
spld[n]=pl
assert_equal(spld, nx.single_source_shortest_path_length(G, 3))
def test_weights_thresholds(self):
wseq=[3,4,3,3,5,6,5,4,5,6]
cs=nxt.weights_to_creation_sequence(wseq,threshold=10)
wseq=nxt.creation_sequence_to_weights(cs)
cs2=nxt.weights_to_creation_sequence(wseq)
assert_equal(cs, cs2)
wseq=nxt.creation_sequence_to_weights(nxt.uncompact([3,1,2,3,3,2,3]))
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights([3,1,2,3,3,2,3])
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights(list(enumerate('ddidiiidididi')))
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididi')
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididid')
ws=[s/float(12) for s in [6,6,5,7,4,4,4,8,3,9,2,10,1,11]]
assert_true(sum([abs(c-d) for c,d in zip(wseq,ws)]) < 1e-14)
def test_finding_routines(self):
G=nx.Graph({1:[2],2:[3],3:[4],4:[5],5:[6]})
G.add_edge(2,4)
G.add_edge(2,5)
G.add_edge(2,7)
G.add_edge(3,6)
G.add_edge(4,6)
# Alternating 4 cycle
assert_equal(nxt.find_alternating_4_cycle(G), [1, 2, 3, 6])
# Threshold graph
TG=nxt.find_threshold_graph(G)
assert_true(nxt.is_threshold_graph(TG))
assert_equal(sorted(TG.nodes()), [1, 2, 3, 4, 5, 7])
cs=nxt.creation_sequence(dict(TG.degree()), with_labels=True)
assert_equal(nxt.find_creation_sequence(G), cs)
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(d for n, d in G.degree()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
# Degree Correlation
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
def test_tg_creation_routines(self):
s=nxt.left_d_threshold_sequence(5,7)
s=nxt.right_d_threshold_sequence(5,7)
s1=nxt.swap_d(s,1.0,1.0)
@attr('numpy')
def test_eigenvectors(self):
try:
import numpy as N
eigenval=N.linalg.eigvals
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
cs='ddiiddid'
G=nxt.threshold_graph(cs)
(tgeval,tgevec)=nxt.eigenvectors(cs)
dot=N.dot
assert_equal([ abs(dot(lv,lv)-1.0)<1e-9 for lv in tgevec ], [True]*8)
lapl=nx.laplacian_matrix(G)
# tgev=[ dot(lv,dot(lapl,lv)) for lv in tgevec ]
# assert_true(sum([abs(c-d) for c,d in zip(tgev,tgeval)]) < 1e-9)
# tgev.sort()
# lev=list(eigenval(lapl))
# lev.sort()
# assert_true(sum([abs(c-d) for c,d in zip(tgev,lev)]) < 1e-9)
def test_create_using(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_raises(nx.exception.NetworkXError,
nxt.threshold_graph, cs, create_using=nx.DiGraph())
MG=nxt.threshold_graph(cs,create_using=nx.MultiGraph())
assert_equal(sorted(MG.edges()), sorted(G.edges()))
| bsd-3-clause |
makinacorpus/ionyweb | ionyweb/administration/views/plugin.py | 2 | 17535 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from djangorestframework.response import Response, ErrorResponse
from djangorestframework import status
from ionyweb.utils import ContentTypeAccessor as CTA
from ionyweb.plugin.models import PluginRelation
from ionyweb.website.rendering import RenderingContext
from ionyweb.administration.views import IsAdminView
from ionyweb.administration.utils import (MESSAGES,
check_object_html_id,
check_placeholder_html_id,
is_page_placeholder_html_id)
from ionyweb.plugin.conf import PLUGINS_CATEGORIES, PLUGINS_LIST
class PluginsView(IsAdminView):
"""
Management of the Plugins List.
"""
def get(self, request):
"""
Return the list of available plugin types.
No parameter required.
"""
html = render_to_string('administration/plugin/plugin-list.html',
{'list_plugin': PLUGINS_LIST,
'list_categories': PLUGINS_CATEGORIES},
context_instance=RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
class PluginsByCategoryView(IsAdminView):
"""
Manage category of plugin
"""
def get(self, request, slug):
list_plugins = PLUGINS_LIST[slug]
html = render_to_string('administration/plugin/plugin-list-by-categories.html',
{'list_plugins': list_plugins},
context_instance=RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
class PluginsDescription(IsAdminView):
def get(self, request, id):
plugin = ContentType.objects.get(id=id)
info = plugin.model_class().get_description()
html = render_to_string('administration/plugin/plugin-description.html',
{'info': info,
'id': id},
context_instance=RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
class PluginView(IsAdminView):
"""
Management of the Plugins
"""
def get(self, request, relation_html_id=None):
"""
Return plugin form to create or update a plugin.
If `relation_html_id` is not None, we get the form of
the existing plugin,else we get an empty form
in order to create a new plugin.
Parameters :
- relation_html_id : Html ID of the plugin relation,
e.g. 'plugin-relation-2' where
'2' is the PluginRelation ID.
GET parameters : (required if `pk` is None)
- placeholder_id : Html ID of the placeholder,
e.g. 'content-placeholder-1'.
- plugin_type : Content Type ID of the new plugin.
"""
# ----
# Get a form of an existing plugin
# ----
if relation_html_id is not None:
# Get the ID relation
pk = check_object_html_id(relation_html_id)[1]
try:
obj = PluginRelation.objects.filter(
pages__website__exact=request.website,
id__exact=pk)[0]
except IndexError:
raise Http404
# Create the plugin form
plugin = obj.content_object
PluginFormClass = plugin.get_admin_form()
form = PluginFormClass(instance=plugin)
# Get the html of the form
content = render_to_string('administration/plugin/plugin-edit.html',
{'form': form,
'plugin': plugin,
'plugin_relation_html_id': relation_html_id},
context_instance=RequestContext(request))
response = Response(status.HTTP_200_OK, {'html': content,})
return self.render(response)
# ----
# Get an empty form to create a new plugin
# ----
placeholder_id = request.GET.get('placeholder_id', None)
plugin_type = request.GET.get('plugin_type', None)
if placeholder_id and plugin_type:
# Check if placeholder ID is valid
check_placeholder_html_id(placeholder_id)
try:
# Get class of the plugin type
plugin_ct = CTA().get_for_pk(plugin_type)
PluginClass = plugin_ct.model_class()
# Create an empty admin form
PluginFormClass = PluginClass().get_admin_form()
plugin_form = PluginFormClass()
# Get html code of the form
content = render_to_string('administration/plugin/plugin-create.html',
{'form': plugin_form,
'placeholder_id': placeholder_id,
'plugin_type': plugin_type,},
context_instance=RequestContext(request))
response = Response(status.HTTP_200_OK, {'html': content,})
return self.render(response)
except ContentType.DoesNotExist:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
# Bad parameters => 400
else:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
def put(self, request):
"""
Create a new plugin.
If modifications are correct return confirmation message
and the new render of the layout section;
if not, return the plugin form with error messages
PUT parameters :
- placeholder_id : Html ID of the placeholder, e.g. 'content-placeholder-1'.
- plugin_type : Content type ID of the new plugin.
- form fields
- csrf token
"""
# Get PUT parameters
request.PUT = self.DATA.copy()
placeholder_html_id = request.PUT.get('placeholder_id', None)
plugin_type = request.PUT.get('plugin_type', None)
if placeholder_html_id and plugin_type:
# Check if placeholder ID is valid
placeholder_slug_items = check_placeholder_html_id(placeholder_html_id)
layout_section_slug = placeholder_slug_items[0]
# Get form of the plugin type
try:
content_type = CTA().get_for_pk(plugin_type)
except ContentType.DoesNotExist:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
PluginClass = content_type.model_class()
plugin = PluginClass()
PluginFormClass = plugin.get_admin_form()
form = PluginFormClass(request.PUT, instance=plugin)
if form.is_valid():
# Creation of the new plugin
new_plugin = form.save()
# Creation of the relation
display_on_new_pages = (not is_page_placeholder_html_id(placeholder_html_id))
relation = PluginRelation.objects.create(content_object=new_plugin,
placeholder_slug= placeholder_html_id,
display_on_new_pages=display_on_new_pages)
relation.pages.add(request.page)
# At the moment, we displayed it on everypage
if display_on_new_pages:
for page in request.website.pages.all():
relation.pages.add(page)
# Set right order
try:
last_relation = PluginRelation.objects.filter(
pages=request.page,
placeholder_slug=placeholder_html_id).order_by('-plugin_order')[0]
plugin_order = last_relation.plugin_order + 10
except IndexError:
plugin_order = 0
relation.plugin_order = plugin_order
# Saving modifications
relation.save()
rendering_context = RenderingContext(request)
plugin_html_medias = rendering_context\
.get_html_medias_for_plugin_relation(relation)
html_rendering = rendering_context.get_html_layout(layout_section_slug)
# Sending response
response = Response(status.HTTP_200_OK,
{'msg': MESSAGES.get('item_edit_success', ''),
'html': html_rendering,
'layout_section_slug': layout_section_slug,
'medias': plugin_html_medias})
return self.render(response)
# Invalid Form => 400 BAD REQUEST
else:
html = render_to_string('administration/plugin/plugin-create.html',
{'form': form,
'placeholder_id': placeholder_html_id,
'plugin_type': plugin_type},
context_instance=RequestContext(request))
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('invalid_data', ""),
'html': html})
# Bad parameters => 400 BAD REQUEST
else:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
def post(self, request, relation_html_id):
"""
Update plugin modifications.
If modifications are correct return confirmation message
and the new render of the layout section;
if not, return the plugin form with error messages
Parameters :
- relation_html_id : PluginRelation Id
POST parameters :
- form fields
- csrf token
"""
pk = check_object_html_id(relation_html_id)[1]
try:
plugin_relation = PluginRelation.objects.filter(
pages__website__exact=request.website,
id__exact=pk)[0]
except IndexError:
raise Http404
# Create the plugin form
plugin = plugin_relation.content_object
PluginFormClass = plugin.get_admin_form()
form = PluginFormClass(request.POST, instance=plugin)
if form.is_valid():
plugin = form.save()
placeholder_slug_items = check_placeholder_html_id(
plugin_relation.placeholder_slug)
layout_section_slug = placeholder_slug_items[0]
rendering_context = RenderingContext(request)
html_rendering = rendering_context.get_html_layout(layout_section_slug)
response = Response(status.HTTP_200_OK,
{"msg": MESSAGES.get('item_edit_success',""),
'html': html_rendering,
'layout_section_slug': layout_section_slug})
return self.render(response)
else:
# Invalid form => 400 BAD REQUEST
# with forms (and errors..)
html = render_to_string('administration/plugin/plugin-edit.html',
{'form': form,
'plugin': plugin,
'plugin_relation_html_id': relation_html_id},
context_instance = RequestContext(request))
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('invalid_data', ""),
'html': html})
def delete(self, request, relation_html_id):
"""
Delete a plugin.
Parameters :
- pk : PluginRelation.id.
"""
pk = check_object_html_id(relation_html_id)[1]
try:
obj = PluginRelation.objects.filter(pages__website__exact=request.website,
id__exact=pk)[0]
except IndexError:
raise Http404
obj.delete()
response = Response(status.HTTP_200_OK,
{"msg": MESSAGES.get('plugin_delete_success', '')})
return self.render(response)
class PluginRelationView(IsAdminView):
"""
Management of the PluginRelation.
"""
def post(self, request):
"""
Update a PluginRelation.
Parameters :
- placeholder_id : HTML ID of the new placeholder, eg "content-placeholder-1"
- plugins_order[] : Ordered list of plugins HTML IDs in the new placeholder
"""
placeholder_html_id = request.POST.get('placeholder_id', None)
plugins_order = request.POST.getlist('plugins_order[]')
if placeholder_html_id and plugins_order:
# Check placeholder HTML ID
check_placeholder_html_id(placeholder_html_id,
extras_id=[settings.HTML_ID_PLACEHOLDER_CLIPBOARD,])
i = 0
for plugin_id in plugins_order:
# Check plugin HTML ID
plugin_type, relation_id = check_object_html_id(plugin_id,
types=[settings.SLUG_PLUGIN,
settings.SLUG_APP])
# Be careful, can be a Page object or a relation object
# In case you are moving the app and not a plugin
if plugin_type == settings.SLUG_APP:
if placeholder_html_id == settings.HTML_ID_PLACEHOLDER_CLIPBOARD:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
# Get page object to manage app order
obj = request.page
# Plugin object
else:
try:
obj = PluginRelation.objects.filter(pages__website__exact=request.website,
id__exact=relation_id)[0]
# 1. This new placeholder_html_id is website placeholder
# - Add all pages
# - Activate auto creation on new pages
if not (is_page_placeholder_html_id(placeholder_html_id) or
placeholder_html_id == settings.HTML_ID_PLACEHOLDER_CLIPBOARD):
obj.pages.add(*request.website.pages.all())
if not obj.display_on_new_pages:
obj.display_on_new_pages = True
obj.save()
else:
# 2. This new placeholder_html_id is page placeholder
# - Delete all pages
# - Add current pages
# - Deactivate auto creation in new page
obj.pages.clear()
obj.pages.add(request.page)
if obj.display_on_new_pages:
obj.display_on_new_pages = False
obj.save()
except IndexError:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
# Update order
obj.plugin_order = i
if plugin_type == settings.SLUG_APP:
if i > 5:
obj.plugin_order -= 5
else:
i = i + 10
# Update placeholder slug
obj.placeholder_slug = placeholder_html_id
obj.save()
# Send a 200 Response
response = Response(status.HTTP_200_OK,
{"msg": MESSAGES.get('items_move_success', '')})
return self.render(response)
# Bad parameters => 400 BAR REQUEST
else:
raise ErrorResponse(status.HTTP_400_BAD_REQUEST,
{'msg': MESSAGES.get('default_error', "")})
| bsd-3-clause |
whummer/moto | moto/iam/responses.py | 1 | 74425 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import iam_backend, User
class IamResponse(BaseResponse):
def attach_role_policy(self):
policy_arn = self._get_param('PolicyArn')
role_name = self._get_param('RoleName')
iam_backend.attach_role_policy(policy_arn, role_name)
template = self.response_template(ATTACH_ROLE_POLICY_TEMPLATE)
return template.render()
def detach_role_policy(self):
role_name = self._get_param('RoleName')
policy_arn = self._get_param('PolicyArn')
iam_backend.detach_role_policy(policy_arn, role_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="DetachRolePolicyResponse")
def attach_group_policy(self):
policy_arn = self._get_param('PolicyArn')
group_name = self._get_param('GroupName')
iam_backend.attach_group_policy(policy_arn, group_name)
template = self.response_template(ATTACH_GROUP_POLICY_TEMPLATE)
return template.render()
def detach_group_policy(self):
policy_arn = self._get_param('PolicyArn')
group_name = self._get_param('GroupName')
iam_backend.detach_group_policy(policy_arn, group_name)
template = self.response_template(DETACH_GROUP_POLICY_TEMPLATE)
return template.render()
def attach_user_policy(self):
policy_arn = self._get_param('PolicyArn')
user_name = self._get_param('UserName')
iam_backend.attach_user_policy(policy_arn, user_name)
template = self.response_template(ATTACH_USER_POLICY_TEMPLATE)
return template.render()
def detach_user_policy(self):
policy_arn = self._get_param('PolicyArn')
user_name = self._get_param('UserName')
iam_backend.detach_user_policy(policy_arn, user_name)
template = self.response_template(DETACH_USER_POLICY_TEMPLATE)
return template.render()
def create_policy(self):
description = self._get_param('Description')
path = self._get_param('Path')
policy_document = self._get_param('PolicyDocument')
policy_name = self._get_param('PolicyName')
policy = iam_backend.create_policy(
description, path, policy_document, policy_name)
template = self.response_template(CREATE_POLICY_TEMPLATE)
return template.render(policy=policy)
def get_policy(self):
policy_arn = self._get_param('PolicyArn')
policy = iam_backend.get_policy(policy_arn)
template = self.response_template(GET_POLICY_TEMPLATE)
return template.render(policy=policy)
def list_attached_role_policies(self):
marker = self._get_param('Marker')
max_items = self._get_int_param('MaxItems', 100)
path_prefix = self._get_param('PathPrefix', '/')
role_name = self._get_param('RoleName')
policies, marker = iam_backend.list_attached_role_policies(
role_name, marker=marker, max_items=max_items, path_prefix=path_prefix)
template = self.response_template(LIST_ATTACHED_ROLE_POLICIES_TEMPLATE)
return template.render(policies=policies, marker=marker)
def list_attached_group_policies(self):
marker = self._get_param('Marker')
max_items = self._get_int_param('MaxItems', 100)
path_prefix = self._get_param('PathPrefix', '/')
group_name = self._get_param('GroupName')
policies, marker = iam_backend.list_attached_group_policies(
group_name, marker=marker, max_items=max_items,
path_prefix=path_prefix)
template = self.response_template(LIST_ATTACHED_GROUP_POLICIES_TEMPLATE)
return template.render(policies=policies, marker=marker)
def list_attached_user_policies(self):
marker = self._get_param('Marker')
max_items = self._get_int_param('MaxItems', 100)
path_prefix = self._get_param('PathPrefix', '/')
user_name = self._get_param('UserName')
policies, marker = iam_backend.list_attached_user_policies(
user_name, marker=marker, max_items=max_items,
path_prefix=path_prefix)
template = self.response_template(LIST_ATTACHED_USER_POLICIES_TEMPLATE)
return template.render(policies=policies, marker=marker)
def list_policies(self):
marker = self._get_param('Marker')
max_items = self._get_int_param('MaxItems', 100)
only_attached = self._get_bool_param('OnlyAttached', False)
path_prefix = self._get_param('PathPrefix', '/')
scope = self._get_param('Scope', 'All')
policies, marker = iam_backend.list_policies(
marker, max_items, only_attached, path_prefix, scope)
template = self.response_template(LIST_POLICIES_TEMPLATE)
return template.render(policies=policies, marker=marker)
def list_entities_for_policy(self):
policy_arn = self._get_param('PolicyArn')
# Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy
entity = self._get_param('EntityFilter')
path_prefix = self._get_param('PathPrefix')
# policy_usage_filter = self._get_param('PolicyUsageFilter')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems')
entity_roles = []
entity_groups = []
entity_users = []
if entity == 'User':
users = iam_backend.list_users(path_prefix, marker, max_items)
if users:
for user in users:
for p in user.managed_policies:
if p == policy_arn:
entity_users.append(user.name)
elif entity == 'Role':
roles = iam_backend.list_roles(path_prefix, marker, max_items)
if roles:
for role in roles:
for p in role.managed_policies:
if p == policy_arn:
entity_roles.append(role.name)
elif entity == 'Group':
groups = iam_backend.list_groups()
if groups:
for group in groups:
for p in group.managed_policies:
if p == policy_arn:
entity_groups.append(group.name)
elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy':
users = iam_backend.list_users(path_prefix, marker, max_items)
if users:
for user in users:
for p in user.managed_policies:
if p == policy_arn:
entity_users.append(user.name)
roles = iam_backend.list_roles(path_prefix, marker, max_items)
if roles:
for role in roles:
for p in role.managed_policies:
if p == policy_arn:
entity_roles.append(role.name)
groups = iam_backend.list_groups()
if groups:
for group in groups:
for p in group.managed_policies:
if p == policy_arn:
entity_groups.append(group.name)
template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE)
return template.render(roles=entity_roles, users=entity_users, groups=entity_groups)
def create_role(self):
role_name = self._get_param('RoleName')
path = self._get_param('Path')
assume_role_policy_document = self._get_param(
'AssumeRolePolicyDocument')
permissions_boundary = self._get_param(
'PermissionsBoundary')
role = iam_backend.create_role(
role_name, assume_role_policy_document, path, permissions_boundary)
template = self.response_template(CREATE_ROLE_TEMPLATE)
return template.render(role=role)
def get_role(self):
role_name = self._get_param('RoleName')
role = iam_backend.get_role(role_name)
template = self.response_template(GET_ROLE_TEMPLATE)
return template.render(role=role)
def delete_role(self):
role_name = self._get_param('RoleName')
iam_backend.delete_role(role_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="DeleteRoleResponse")
def list_role_policies(self):
role_name = self._get_param('RoleName')
role_policies_names = iam_backend.list_role_policies(role_name)
template = self.response_template(LIST_ROLE_POLICIES)
return template.render(role_policies=role_policies_names)
def put_role_policy(self):
role_name = self._get_param('RoleName')
policy_name = self._get_param('PolicyName')
policy_document = self._get_param('PolicyDocument')
iam_backend.put_role_policy(role_name, policy_name, policy_document)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="PutRolePolicyResponse")
def delete_role_policy(self):
role_name = self._get_param('RoleName')
policy_name = self._get_param('PolicyName')
iam_backend.delete_role_policy(role_name, policy_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="DeleteRolePolicyResponse")
def get_role_policy(self):
role_name = self._get_param('RoleName')
policy_name = self._get_param('PolicyName')
policy_name, policy_document = iam_backend.get_role_policy(
role_name, policy_name)
template = self.response_template(GET_ROLE_POLICY_TEMPLATE)
return template.render(role_name=role_name,
policy_name=policy_name,
policy_document=policy_document)
def update_assume_role_policy(self):
role_name = self._get_param('RoleName')
role = iam_backend.get_role(role_name)
role.assume_role_policy_document = self._get_param('PolicyDocument')
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="UpdateAssumeRolePolicyResponse")
def update_role_description(self):
role_name = self._get_param('RoleName')
description = self._get_param('Description')
role = iam_backend.update_role_description(role_name, description)
template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE)
return template.render(role=role)
def update_role(self):
role_name = self._get_param('RoleName')
description = self._get_param('Description')
role = iam_backend.update_role(role_name, description)
template = self.response_template(UPDATE_ROLE_TEMPLATE)
return template.render(role=role)
def create_policy_version(self):
policy_arn = self._get_param('PolicyArn')
policy_document = self._get_param('PolicyDocument')
set_as_default = self._get_param('SetAsDefault')
policy_version = iam_backend.create_policy_version(policy_arn, policy_document, set_as_default)
template = self.response_template(CREATE_POLICY_VERSION_TEMPLATE)
return template.render(policy_version=policy_version)
def get_policy_version(self):
policy_arn = self._get_param('PolicyArn')
version_id = self._get_param('VersionId')
policy_version = iam_backend.get_policy_version(policy_arn, version_id)
template = self.response_template(GET_POLICY_VERSION_TEMPLATE)
return template.render(policy_version=policy_version)
def list_policy_versions(self):
policy_arn = self._get_param('PolicyArn')
policy_versions = iam_backend.list_policy_versions(policy_arn)
template = self.response_template(LIST_POLICY_VERSIONS_TEMPLATE)
return template.render(policy_versions=policy_versions)
def delete_policy_version(self):
policy_arn = self._get_param('PolicyArn')
version_id = self._get_param('VersionId')
iam_backend.delete_policy_version(policy_arn, version_id)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeletePolicyVersion')
def create_instance_profile(self):
profile_name = self._get_param('InstanceProfileName')
path = self._get_param('Path', '/')
profile = iam_backend.create_instance_profile(
profile_name, path, role_ids=[])
template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE)
return template.render(profile=profile)
def get_instance_profile(self):
profile_name = self._get_param('InstanceProfileName')
profile = iam_backend.get_instance_profile(profile_name)
template = self.response_template(GET_INSTANCE_PROFILE_TEMPLATE)
return template.render(profile=profile)
def add_role_to_instance_profile(self):
profile_name = self._get_param('InstanceProfileName')
role_name = self._get_param('RoleName')
iam_backend.add_role_to_instance_profile(profile_name, role_name)
template = self.response_template(
ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE)
return template.render()
def remove_role_from_instance_profile(self):
profile_name = self._get_param('InstanceProfileName')
role_name = self._get_param('RoleName')
iam_backend.remove_role_from_instance_profile(profile_name, role_name)
template = self.response_template(
REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE)
return template.render()
def list_roles(self):
roles = iam_backend.get_roles()
template = self.response_template(LIST_ROLES_TEMPLATE)
return template.render(roles=roles)
def list_instance_profiles(self):
profiles = iam_backend.get_instance_profiles()
template = self.response_template(LIST_INSTANCE_PROFILES_TEMPLATE)
return template.render(instance_profiles=profiles)
def list_instance_profiles_for_role(self):
role_name = self._get_param('RoleName')
profiles = iam_backend.get_instance_profiles_for_role(
role_name=role_name)
template = self.response_template(
LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE)
return template.render(instance_profiles=profiles)
def upload_server_certificate(self):
cert_name = self._get_param('ServerCertificateName')
cert_body = self._get_param('CertificateBody')
path = self._get_param('Path')
private_key = self._get_param('PrivateKey')
cert_chain = self._get_param('CertificateName')
cert = iam_backend.upload_server_cert(
cert_name, cert_body, private_key, cert_chain=cert_chain, path=path)
template = self.response_template(UPLOAD_CERT_TEMPLATE)
return template.render(certificate=cert)
def list_server_certificates(self, marker=None):
certs = iam_backend.get_all_server_certs(marker=marker)
template = self.response_template(LIST_SERVER_CERTIFICATES_TEMPLATE)
return template.render(server_certificates=certs)
def get_server_certificate(self):
cert_name = self._get_param('ServerCertificateName')
cert = iam_backend.get_server_certificate(cert_name)
template = self.response_template(GET_SERVER_CERTIFICATE_TEMPLATE)
return template.render(certificate=cert)
def delete_server_certificate(self):
cert_name = self._get_param('ServerCertificateName')
iam_backend.delete_server_certificate(cert_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="DeleteServerCertificate")
def create_group(self):
group_name = self._get_param('GroupName')
path = self._get_param('Path', '/')
group = iam_backend.create_group(group_name, path)
template = self.response_template(CREATE_GROUP_TEMPLATE)
return template.render(group=group)
def get_group(self):
group_name = self._get_param('GroupName')
group = iam_backend.get_group(group_name)
template = self.response_template(GET_GROUP_TEMPLATE)
return template.render(group=group)
def list_groups(self):
groups = iam_backend.list_groups()
template = self.response_template(LIST_GROUPS_TEMPLATE)
return template.render(groups=groups)
def list_groups_for_user(self):
user_name = self._get_param('UserName')
groups = iam_backend.get_groups_for_user(user_name)
template = self.response_template(LIST_GROUPS_FOR_USER_TEMPLATE)
return template.render(groups=groups)
def put_group_policy(self):
group_name = self._get_param('GroupName')
policy_name = self._get_param('PolicyName')
policy_document = self._get_param('PolicyDocument')
iam_backend.put_group_policy(group_name, policy_name, policy_document)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="PutGroupPolicyResponse")
def list_group_policies(self):
group_name = self._get_param('GroupName')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems')
policies = iam_backend.list_group_policies(group_name,
marker=marker, max_items=max_items)
template = self.response_template(LIST_GROUP_POLICIES_TEMPLATE)
return template.render(name="ListGroupPoliciesResponse",
policies=policies,
marker=marker)
def get_group_policy(self):
group_name = self._get_param('GroupName')
policy_name = self._get_param('PolicyName')
policy_result = iam_backend.get_group_policy(group_name, policy_name)
template = self.response_template(GET_GROUP_POLICY_TEMPLATE)
return template.render(name="GetGroupPolicyResponse", **policy_result)
def create_user(self):
user_name = self._get_param('UserName')
path = self._get_param('Path')
user = iam_backend.create_user(user_name, path)
template = self.response_template(USER_TEMPLATE)
return template.render(action='Create', user=user)
def get_user(self):
user_name = self._get_param('UserName')
if not user_name:
access_key_id = self.get_current_user()
user = iam_backend.get_user_from_access_key_id(access_key_id)
if user is None:
user = User("default_user")
else:
user = iam_backend.get_user(user_name)
template = self.response_template(USER_TEMPLATE)
return template.render(action='Get', user=user)
def list_users(self):
path_prefix = self._get_param('PathPrefix')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems')
users = iam_backend.list_users(path_prefix, marker, max_items)
template = self.response_template(LIST_USERS_TEMPLATE)
return template.render(action='List', users=users)
def update_user(self):
user_name = self._get_param('UserName')
new_path = self._get_param('NewPath')
new_user_name = self._get_param('NewUserName')
iam_backend.update_user(user_name, new_path, new_user_name)
if new_user_name:
user = iam_backend.get_user(new_user_name)
else:
user = iam_backend.get_user(user_name)
template = self.response_template(USER_TEMPLATE)
return template.render(action='Update', user=user)
def create_login_profile(self):
user_name = self._get_param('UserName')
password = self._get_param('Password')
user = iam_backend.create_login_profile(user_name, password)
template = self.response_template(CREATE_LOGIN_PROFILE_TEMPLATE)
return template.render(user=user)
def get_login_profile(self):
user_name = self._get_param('UserName')
user = iam_backend.get_login_profile(user_name)
template = self.response_template(GET_LOGIN_PROFILE_TEMPLATE)
return template.render(user=user)
def update_login_profile(self):
user_name = self._get_param('UserName')
password = self._get_param('Password')
password_reset_required = self._get_param('PasswordResetRequired')
user = iam_backend.update_login_profile(user_name, password, password_reset_required)
template = self.response_template(UPDATE_LOGIN_PROFILE_TEMPLATE)
return template.render(user=user)
def add_user_to_group(self):
group_name = self._get_param('GroupName')
user_name = self._get_param('UserName')
iam_backend.add_user_to_group(group_name, user_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='AddUserToGroup')
def remove_user_from_group(self):
group_name = self._get_param('GroupName')
user_name = self._get_param('UserName')
iam_backend.remove_user_from_group(group_name, user_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='RemoveUserFromGroup')
def get_user_policy(self):
user_name = self._get_param('UserName')
policy_name = self._get_param('PolicyName')
policy_document = iam_backend.get_user_policy(user_name, policy_name)
template = self.response_template(GET_USER_POLICY_TEMPLATE)
return template.render(
user_name=user_name,
policy_name=policy_name,
policy_document=policy_document.get('policy_document')
)
def list_user_policies(self):
user_name = self._get_param('UserName')
policies = iam_backend.list_user_policies(user_name)
template = self.response_template(LIST_USER_POLICIES_TEMPLATE)
return template.render(policies=policies)
def put_user_policy(self):
user_name = self._get_param('UserName')
policy_name = self._get_param('PolicyName')
policy_document = self._get_param('PolicyDocument')
iam_backend.put_user_policy(user_name, policy_name, policy_document)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='PutUserPolicy')
def delete_user_policy(self):
user_name = self._get_param('UserName')
policy_name = self._get_param('PolicyName')
iam_backend.delete_user_policy(user_name, policy_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeleteUserPolicy')
def create_access_key(self):
user_name = self._get_param('UserName')
key = iam_backend.create_access_key(user_name)
template = self.response_template(CREATE_ACCESS_KEY_TEMPLATE)
return template.render(key=key)
def update_access_key(self):
user_name = self._get_param('UserName')
access_key_id = self._get_param('AccessKeyId')
status = self._get_param('Status')
iam_backend.update_access_key(user_name, access_key_id, status)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='UpdateAccessKey')
def get_access_key_last_used(self):
access_key_id = self._get_param('AccessKeyId')
last_used_response = iam_backend.get_access_key_last_used(access_key_id)
template = self.response_template(GET_ACCESS_KEY_LAST_USED_TEMPLATE)
return template.render(user_name=last_used_response["user_name"], last_used=last_used_response["last_used"])
def list_access_keys(self):
user_name = self._get_param('UserName')
keys = iam_backend.get_all_access_keys(user_name)
template = self.response_template(LIST_ACCESS_KEYS_TEMPLATE)
return template.render(user_name=user_name, keys=keys)
def delete_access_key(self):
user_name = self._get_param('UserName')
access_key_id = self._get_param('AccessKeyId')
iam_backend.delete_access_key(access_key_id, user_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeleteAccessKey')
def deactivate_mfa_device(self):
user_name = self._get_param('UserName')
serial_number = self._get_param('SerialNumber')
iam_backend.deactivate_mfa_device(user_name, serial_number)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeactivateMFADevice')
def enable_mfa_device(self):
user_name = self._get_param('UserName')
serial_number = self._get_param('SerialNumber')
authentication_code_1 = self._get_param('AuthenticationCode1')
authentication_code_2 = self._get_param('AuthenticationCode2')
iam_backend.enable_mfa_device(
user_name,
serial_number,
authentication_code_1,
authentication_code_2
)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='EnableMFADevice')
def list_mfa_devices(self):
user_name = self._get_param('UserName')
devices = iam_backend.list_mfa_devices(user_name)
template = self.response_template(LIST_MFA_DEVICES_TEMPLATE)
return template.render(user_name=user_name, devices=devices)
def delete_user(self):
user_name = self._get_param('UserName')
iam_backend.delete_user(user_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeleteUser')
def delete_login_profile(self):
user_name = self._get_param('UserName')
iam_backend.delete_login_profile(user_name)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeleteLoginProfile')
def generate_credential_report(self):
if iam_backend.report_generated():
template = self.response_template(CREDENTIAL_REPORT_GENERATED)
else:
template = self.response_template(CREDENTIAL_REPORT_GENERATING)
iam_backend.generate_report()
return template.render()
def get_credential_report(self):
report = iam_backend.get_credential_report()
template = self.response_template(CREDENTIAL_REPORT)
return template.render(report=report)
def list_account_aliases(self):
aliases = iam_backend.list_account_aliases()
template = self.response_template(LIST_ACCOUNT_ALIASES_TEMPLATE)
return template.render(aliases=aliases)
def create_account_alias(self):
alias = self._get_param('AccountAlias')
iam_backend.create_account_alias(alias)
template = self.response_template(CREATE_ACCOUNT_ALIAS_TEMPLATE)
return template.render()
def delete_account_alias(self):
alias = self._get_param('AccountAlias')
iam_backend.delete_account_alias(alias)
template = self.response_template(DELETE_ACCOUNT_ALIAS_TEMPLATE)
return template.render()
def get_account_authorization_details(self):
filter_param = self._get_multi_param('Filter.member')
account_details = iam_backend.get_account_authorization_details(filter_param)
template = self.response_template(GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE)
return template.render(
instance_profiles=account_details['instance_profiles'],
policies=account_details['managed_policies'],
users=account_details['users'],
groups=account_details['groups'],
roles=account_details['roles'],
get_groups_for_user=iam_backend.get_groups_for_user
)
def create_saml_provider(self):
saml_provider_name = self._get_param('Name')
saml_metadata_document = self._get_param('SAMLMetadataDocument')
saml_provider = iam_backend.create_saml_provider(saml_provider_name, saml_metadata_document)
template = self.response_template(CREATE_SAML_PROVIDER_TEMPLATE)
return template.render(saml_provider=saml_provider)
def update_saml_provider(self):
saml_provider_arn = self._get_param('SAMLProviderArn')
saml_metadata_document = self._get_param('SAMLMetadataDocument')
saml_provider = iam_backend.update_saml_provider(saml_provider_arn, saml_metadata_document)
template = self.response_template(UPDATE_SAML_PROVIDER_TEMPLATE)
return template.render(saml_provider=saml_provider)
def delete_saml_provider(self):
saml_provider_arn = self._get_param('SAMLProviderArn')
iam_backend.delete_saml_provider(saml_provider_arn)
template = self.response_template(DELETE_SAML_PROVIDER_TEMPLATE)
return template.render()
def list_saml_providers(self):
saml_providers = iam_backend.list_saml_providers()
template = self.response_template(LIST_SAML_PROVIDERS_TEMPLATE)
return template.render(saml_providers=saml_providers)
def get_saml_provider(self):
saml_provider_arn = self._get_param('SAMLProviderArn')
saml_provider = iam_backend.get_saml_provider(saml_provider_arn)
template = self.response_template(GET_SAML_PROVIDER_TEMPLATE)
return template.render(saml_provider=saml_provider)
def upload_signing_certificate(self):
user_name = self._get_param('UserName')
cert_body = self._get_param('CertificateBody')
cert = iam_backend.upload_signing_certificate(user_name, cert_body)
template = self.response_template(UPLOAD_SIGNING_CERTIFICATE_TEMPLATE)
return template.render(cert=cert)
def update_signing_certificate(self):
user_name = self._get_param('UserName')
cert_id = self._get_param('CertificateId')
status = self._get_param('Status')
iam_backend.update_signing_certificate(user_name, cert_id, status)
template = self.response_template(UPDATE_SIGNING_CERTIFICATE_TEMPLATE)
return template.render()
def delete_signing_certificate(self):
user_name = self._get_param('UserName')
cert_id = self._get_param('CertificateId')
iam_backend.delete_signing_certificate(user_name, cert_id)
template = self.response_template(DELETE_SIGNING_CERTIFICATE_TEMPLATE)
return template.render()
def list_signing_certificates(self):
user_name = self._get_param('UserName')
certs = iam_backend.list_signing_certificates(user_name)
template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE)
return template.render(user_name=user_name, certificates=certs)
def list_role_tags(self):
role_name = self._get_param('RoleName')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems', 100)
tags, marker = iam_backend.list_role_tags(role_name, marker, max_items)
template = self.response_template(LIST_ROLE_TAG_TEMPLATE)
return template.render(tags=tags, marker=marker)
def tag_role(self):
role_name = self._get_param('RoleName')
tags = self._get_multi_param('Tags.member')
iam_backend.tag_role(role_name, tags)
template = self.response_template(TAG_ROLE_TEMPLATE)
return template.render()
def untag_role(self):
role_name = self._get_param('RoleName')
tag_keys = self._get_multi_param('TagKeys.member')
iam_backend.untag_role(role_name, tag_keys)
template = self.response_template(UNTAG_ROLE_TEMPLATE)
return template.render()
LIST_ENTITIES_FOR_POLICY_TEMPLATE = """<ListEntitiesForPolicyResponse>
<ListEntitiesForPolicyResult>
<PolicyRoles>
{% for role in roles %}
<member>
<RoleName>{{ role }}</RoleName>
</member>
{% endfor %}
</PolicyRoles>
<PolicyGroups>
{% for group in groups %}
<member>
<GroupName>{{ group }}</GroupName>
</member>
{% endfor %}
</PolicyGroups>
<IsTruncated>false</IsTruncated>
<PolicyUsers>
{% for user in users %}
<member>
<UserName>{{ user }}</UserName>
</member>
{% endfor %}
</PolicyUsers>
</ListEntitiesForPolicyResult>
<ResponseMetadata>
<RequestId>eb358e22-9d1f-11e4-93eb-190ecEXAMPLE</RequestId>
</ResponseMetadata>
</ListEntitiesForPolicyResponse>"""
ATTACH_ROLE_POLICY_TEMPLATE = """<AttachRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</AttachRolePolicyResponse>"""
DETACH_ROLE_POLICY_TEMPLATE = """<DetachRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DetachRolePolicyResponse>"""
ATTACH_USER_POLICY_TEMPLATE = """<AttachUserPolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</AttachUserPolicyResponse>"""
DETACH_USER_POLICY_TEMPLATE = """<DetachUserPolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DetachUserPolicyResponse>"""
ATTACH_GROUP_POLICY_TEMPLATE = """<AttachGroupPolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</AttachGroupPolicyResponse>"""
DETACH_GROUP_POLICY_TEMPLATE = """<DetachGroupPolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DetachGroupPolicyResponse>"""
CREATE_POLICY_TEMPLATE = """<CreatePolicyResponse>
<CreatePolicyResult>
<Policy>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<Path>{{ policy.path }}</Path>
<PolicyId>{{ policy.id }}</PolicyId>
<PolicyName>{{ policy.name }}</PolicyName>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</Policy>
</CreatePolicyResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</CreatePolicyResponse>"""
GET_POLICY_TEMPLATE = """<GetPolicyResponse>
<GetPolicyResult>
<Policy>
<PolicyName>{{ policy.name }}</PolicyName>
<Description>{{ policy.description }}</Description>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<PolicyId>{{ policy.id }}</PolicyId>
<Path>{{ policy.path }}</Path>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</Policy>
</GetPolicyResult>
<ResponseMetadata>
<RequestId>684f0917-3d22-11e4-a4a0-cffb9EXAMPLE</RequestId>
</ResponseMetadata>
</GetPolicyResponse>"""
LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """<ListAttachedRolePoliciesResponse>
<ListAttachedRolePoliciesResult>
{% if marker is none %}
<IsTruncated>false</IsTruncated>
{% else %}
<IsTruncated>true</IsTruncated>
<Marker>{{ marker }}</Marker>
{% endif %}
<AttachedPolicies>
{% for policy in policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyArn>{{ policy.arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedPolicies>
</ListAttachedRolePoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListAttachedRolePoliciesResponse>"""
LIST_ATTACHED_GROUP_POLICIES_TEMPLATE = """<ListAttachedGroupPoliciesResponse>
<ListAttachedGroupPoliciesResult>
{% if marker is none %}
<IsTruncated>false</IsTruncated>
{% else %}
<IsTruncated>true</IsTruncated>
<Marker>{{ marker }}</Marker>
{% endif %}
<AttachedPolicies>
{% for policy in policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyArn>{{ policy.arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedPolicies>
</ListAttachedGroupPoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListAttachedGroupPoliciesResponse>"""
LIST_ATTACHED_USER_POLICIES_TEMPLATE = """<ListAttachedUserPoliciesResponse>
<ListAttachedUserPoliciesResult>
{% if marker is none %}
<IsTruncated>false</IsTruncated>
{% else %}
<IsTruncated>true</IsTruncated>
<Marker>{{ marker }}</Marker>
{% endif %}
<AttachedPolicies>
{% for policy in policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyArn>{{ policy.arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedPolicies>
</ListAttachedUserPoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListAttachedUserPoliciesResponse>"""
LIST_POLICIES_TEMPLATE = """<ListPoliciesResponse>
<ListPoliciesResult>
{% if marker is none %}
<IsTruncated>false</IsTruncated>
{% else %}
<IsTruncated>true</IsTruncated>
<Marker>{{ marker }}</Marker>
{% endif %}
<Policies>
{% for policy in policies %}
<member>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<Path>{{ policy.path }}</Path>
<PolicyId>{{ policy.id }}</PolicyId>
<PolicyName>{{ policy.name }}</PolicyName>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</member>
{% endfor %}
</Policies>
</ListPoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListPoliciesResponse>"""
GENERIC_EMPTY_TEMPLATE = """<{{ name }}Response>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</{{ name }}Response>"""
CREATE_INSTANCE_PROFILE_TEMPLATE = """<CreateInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<CreateInstanceProfileResult>
<InstanceProfile>
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
<Roles/>
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</InstanceProfile>
</CreateInstanceProfileResult>
<ResponseMetadata>
<RequestId>974142ee-99f1-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</CreateInstanceProfileResponse>"""
GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetInstanceProfileResult>
<InstanceProfile>
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
<Roles>
{% for role in profile.roles %}
<member>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
</Roles>
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</InstanceProfile>
</GetInstanceProfileResult>
<ResponseMetadata>
<RequestId>37289fda-99f2-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</GetInstanceProfileResponse>"""
CREATE_ROLE_TEMPLATE = """<CreateRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<CreateRoleResult>
<Role>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</Role>
</CreateRoleResult>
<ResponseMetadata>
<RequestId>4a93ceee-9966-11e1-b624-b1aEXAMPLE7c</RequestId>
</ResponseMetadata>
</CreateRoleResponse>"""
GET_ROLE_POLICY_TEMPLATE = """<GetRolePolicyResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetRolePolicyResult>
<PolicyName>{{ policy_name }}</PolicyName>
<RoleName>{{ role_name }}</RoleName>
<PolicyDocument>{{ policy_document }}</PolicyDocument>
</GetRolePolicyResult>
<ResponseMetadata>
<RequestId>7e7cd8bc-99ef-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</GetRolePolicyResponse>"""
UPDATE_ROLE_TEMPLATE = """<UpdateRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UpdateRoleResult>
</UpdateRoleResult>
<ResponseMetadata>
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
</ResponseMetadata>
</UpdateRoleResponse>"""
UPDATE_ROLE_DESCRIPTION_TEMPLATE = """<UpdateRoleDescriptionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UpdateRoleDescriptionResult>
<Role>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.tags %}
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
{% endif %}
</Role>
</UpdateRoleDescriptionResult>
<ResponseMetadata>
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
</ResponseMetadata>
</UpdateRoleDescriptionResponse>"""
GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetRoleResult>
<Role>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.tags %}
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
{% endif %}
</Role>
</GetRoleResult>
<ResponseMetadata>
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
</ResponseMetadata>
</GetRoleResponse>"""
ADD_ROLE_TO_INSTANCE_PROFILE_TEMPLATE = """<AddRoleToInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>12657608-99f2-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</AddRoleToInstanceProfileResponse>"""
REMOVE_ROLE_FROM_INSTANCE_PROFILE_TEMPLATE = """<RemoveRoleFromInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>12657608-99f2-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</RemoveRoleFromInstanceProfileResponse>"""
LIST_ROLES_TEMPLATE = """<ListRolesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListRolesResult>
<IsTruncated>false</IsTruncated>
<Roles>
{% for role in roles %}
<member>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</member>
{% endfor %}
</Roles>
</ListRolesResult>
<ResponseMetadata>
<RequestId>20f7279f-99ee-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListRolesResponse>"""
LIST_ROLE_POLICIES = """<ListRolePoliciesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListRolePoliciesResult>
<PolicyNames>
{% for policy_name in role_policies %}
<member>{{ policy_name }}</member>
{% endfor %}
</PolicyNames>
<IsTruncated>false</IsTruncated>
</ListRolePoliciesResult>
<ResponseMetadata>
<RequestId>8c7e1816-99f0-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListRolePoliciesResponse>"""
CREATE_POLICY_VERSION_TEMPLATE = """<CreatePolicyVersionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<CreatePolicyVersionResult>
<PolicyVersion>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</PolicyVersion>
</CreatePolicyVersionResult>
<ResponseMetadata>
<RequestId>20f7279f-99ee-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</CreatePolicyVersionResponse>"""
GET_POLICY_VERSION_TEMPLATE = """<GetPolicyVersionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetPolicyVersionResult>
<PolicyVersion>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</PolicyVersion>
</GetPolicyVersionResult>
<ResponseMetadata>
<RequestId>20f7279f-99ee-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</GetPolicyVersionResponse>"""
LIST_POLICY_VERSIONS_TEMPLATE = """<ListPolicyVersionsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListPolicyVersionsResult>
<IsTruncated>false</IsTruncated>
<Versions>
{% for policy_version in policy_versions %}
<member>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</Versions>
</ListPolicyVersionsResult>
<ResponseMetadata>
<RequestId>20f7279f-99ee-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListPolicyVersionsResponse>"""
LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListInstanceProfilesResult>
<IsTruncated>false</IsTruncated>
<InstanceProfiles>
{% for instance in instance_profiles %}
<member>
<InstanceProfileId>{{ instance.id }}</InstanceProfileId>
<Roles>
{% for role in instance.roles %}
<member>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
</Roles>
<InstanceProfileName>{{ instance.name }}</InstanceProfileName>
<Path>{{ instance.path }}</Path>
<Arn>{{ instance.arn }}</Arn>
<CreateDate>{{ instance.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfiles>
</ListInstanceProfilesResult>
<ResponseMetadata>
<RequestId>fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListInstanceProfilesResponse>"""
UPLOAD_CERT_TEMPLATE = """<UploadServerCertificateResponse>
<UploadServerCertificateResult>
<ServerCertificateMetadata>
<ServerCertificateName>{{ certificate.cert_name }}</ServerCertificateName>
{% if certificate.path %}
<Path>{{ certificate.path }}</Path>
{% endif %}
<Arn>{{ certificate.arn }}</Arn>
<UploadDate>2010-05-08T01:02:03.004Z</UploadDate>
<ServerCertificateId>ASCACKCEVSQ6C2EXAMPLE</ServerCertificateId>
<Expiration>2012-05-08T01:02:03.004Z</Expiration>
</ServerCertificateMetadata>
</UploadServerCertificateResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</UploadServerCertificateResponse>"""
LIST_SERVER_CERTIFICATES_TEMPLATE = """<ListServerCertificatesResponse>
<ListServerCertificatesResult>
<IsTruncated>false</IsTruncated>
<ServerCertificateMetadataList>
{% for certificate in server_certificates %}
<member>
<ServerCertificateName>{{ certificate.cert_name }}</ServerCertificateName>
{% if certificate.path %}
<Path>{{ certificate.path }}</Path>
{% endif %}
<Arn>{{ certificate.arn }}</Arn>
<UploadDate>2010-05-08T01:02:03.004Z</UploadDate>
<ServerCertificateId>ASCACKCEVSQ6C2EXAMPLE</ServerCertificateId>
<Expiration>2012-05-08T01:02:03.004Z</Expiration>
</member>
{% endfor %}
</ServerCertificateMetadataList>
</ListServerCertificatesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListServerCertificatesResponse>"""
GET_SERVER_CERTIFICATE_TEMPLATE = """<GetServerCertificateResponse>
<GetServerCertificateResult>
<ServerCertificate>
<ServerCertificateMetadata>
<ServerCertificateName>{{ certificate.cert_name }}</ServerCertificateName>
{% if certificate.path %}
<Path>{{ certificate.path }}</Path>
{% endif %}
<Arn>{{ certificate.arn }}</Arn>
<UploadDate>2010-05-08T01:02:03.004Z</UploadDate>
<ServerCertificateId>ASCACKCEVSQ6C2EXAMPLE</ServerCertificateId>
<Expiration>2012-05-08T01:02:03.004Z</Expiration>
</ServerCertificateMetadata>
<CertificateBody>{{ certificate.cert_body }}</CertificateBody>
</ServerCertificate>
</GetServerCertificateResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</GetServerCertificateResponse>"""
CREATE_GROUP_TEMPLATE = """<CreateGroupResponse>
<CreateGroupResult>
<Group>
<Path>{{ group.path }}</Path>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</Group>
</CreateGroupResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</CreateGroupResponse>"""
GET_GROUP_TEMPLATE = """<GetGroupResponse>
<GetGroupResult>
<Group>
<Path>{{ group.path }}</Path>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</Group>
<Users>
{% for user in group.users %}
<member>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
<UserId>{{ user.id }}</UserId>
<Arn>{{ user.arn }}</Arn>
</member>
{% endfor %}
</Users>
<IsTruncated>false</IsTruncated>
</GetGroupResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</GetGroupResponse>"""
LIST_GROUPS_TEMPLATE = """<ListGroupsResponse>
<ListGroupsResult>
<Groups>
{% for group in groups %}
<member>
<Path>{{ group.path }}</Path>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
</member>
{% endfor %}
</Groups>
<IsTruncated>false</IsTruncated>
</ListGroupsResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListGroupsResponse>"""
LIST_GROUPS_FOR_USER_TEMPLATE = """<ListGroupsForUserResponse>
<ListGroupsForUserResult>
<Groups>
{% for group in groups %}
<member>
<Path>{{ group.path }}</Path>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</Groups>
<IsTruncated>false</IsTruncated>
</ListGroupsForUserResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListGroupsForUserResponse>"""
LIST_GROUP_POLICIES_TEMPLATE = """<ListGroupPoliciesResponse>
<ListGroupPoliciesResult>
{% if marker is none %}
<IsTruncated>false</IsTruncated>
{% else %}
<IsTruncated>true</IsTruncated>
<Marker>{{ marker }}</Marker>
{% endif %}
<PolicyNames>
{% for policy in policies %}
<member>{{ policy }}</member>
{% endfor %}
</PolicyNames>
</ListGroupPoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListGroupPoliciesResponse>"""
GET_GROUP_POLICY_TEMPLATE = """<GetGroupPolicyResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetGroupPolicyResult>
<PolicyName>{{ policy_name }}</PolicyName>
<GroupName>{{ group_name }}</GroupName>
<PolicyDocument>{{ policy_document }}</PolicyDocument>
</GetGroupPolicyResult>
<ResponseMetadata>
<RequestId>7e7cd8bc-99ef-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</GetGroupPolicyResponse>"""
USER_TEMPLATE = """<{{ action }}UserResponse>
<{{ action }}UserResult>
<User>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
<UserId>{{ user.id }}</UserId>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
<Arn>{{ user.arn }}</Arn>
</User>
</{{ action }}UserResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</{{ action }}UserResponse>"""
LIST_USERS_TEMPLATE = """<{{ action }}UsersResponse>
<{{ action }}UsersResult>
<Users>
{% for user in users %}
<member>
<UserId>{{ user.id }}</UserId>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
<Arn>{{ user.arn }}</Arn>
</member>
{% endfor %}
</Users>
</{{ action }}UsersResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</{{ action }}UsersResponse>"""
CREATE_LOGIN_PROFILE_TEMPLATE = """<CreateLoginProfileResponse>
<CreateLoginProfileResult>
<LoginProfile>
<UserName>{{ user.name }}</UserName>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
</LoginProfile>
</CreateLoginProfileResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</CreateLoginProfileResponse>
"""
GET_LOGIN_PROFILE_TEMPLATE = """<GetLoginProfileResponse>
<GetLoginProfileResult>
<LoginProfile>
<UserName>{{ user.name }}</UserName>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
{% if user.password_reset_required %}
<PasswordResetRequired>true</PasswordResetRequired>
{% endif %}
</LoginProfile>
</GetLoginProfileResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</GetLoginProfileResponse>
"""
UPDATE_LOGIN_PROFILE_TEMPLATE = """<UpdateLoginProfileResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</UpdateLoginProfileResponse>
"""
GET_USER_POLICY_TEMPLATE = """<GetUserPolicyResponse>
<GetUserPolicyResult>
<UserName>{{ user_name }}</UserName>
<PolicyName>{{ policy_name }}</PolicyName>
<PolicyDocument>
{{ policy_document }}
</PolicyDocument>
</GetUserPolicyResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</GetUserPolicyResponse>"""
LIST_USER_POLICIES_TEMPLATE = """<ListUserPoliciesResponse>
<ListUserPoliciesResult>
<PolicyNames>
{% for policy in policies %}
<member>{{ policy }}</member>
{% endfor %}
</PolicyNames>
<IsTruncated>false</IsTruncated>
</ListUserPoliciesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListUserPoliciesResponse>"""
CREATE_ACCESS_KEY_TEMPLATE = """<CreateAccessKeyResponse>
<CreateAccessKeyResult>
<AccessKey>
<UserName>{{ key.user_name }}</UserName>
<AccessKeyId>{{ key.access_key_id }}</AccessKeyId>
<Status>{{ key.status }}</Status>
<SecretAccessKey>{{ key.secret_access_key }}</SecretAccessKey>
<CreateDate>{{ key.created_iso_8601 }}</CreateDate>
</AccessKey>
</CreateAccessKeyResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</CreateAccessKeyResponse>"""
LIST_ACCESS_KEYS_TEMPLATE = """<ListAccessKeysResponse>
<ListAccessKeysResult>
<UserName>{{ user_name }}</UserName>
<AccessKeyMetadata>
{% for key in keys %}
<member>
<UserName>{{ user_name }}</UserName>
<AccessKeyId>{{ key.access_key_id }}</AccessKeyId>
<Status>{{ key.status }}</Status>
<CreateDate>{{ key.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</AccessKeyMetadata>
<IsTruncated>false</IsTruncated>
</ListAccessKeysResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListAccessKeysResponse>"""
GET_ACCESS_KEY_LAST_USED_TEMPLATE = """
<GetAccessKeyLastUsedResponse>
<GetAccessKeyLastUsedResult>
<UserName>{{ user_name }}</UserName>
<AccessKeyLastUsed>
<LastUsedDate>{{ last_used }}</LastUsedDate>
</AccessKeyLastUsed>
</GetAccessKeyLastUsedResult>
</GetAccessKeyLastUsedResponse>
"""
CREDENTIAL_REPORT_GENERATING = """
<GenerateCredentialReportResponse>
<GenerateCredentialReportResult>
<State>STARTED</State>
<Description>No report exists. Starting a new report generation task</Description>
</GenerateCredentialReportResult>
<ResponseMetadata>
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
</ResponseMetadata>
</GenerateCredentialReportResponse>"""
CREDENTIAL_REPORT_GENERATED = """<GenerateCredentialReportResponse>
<GenerateCredentialReportResult>
<State>COMPLETE</State>
</GenerateCredentialReportResult>
<ResponseMetadata>
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
</ResponseMetadata>
</GenerateCredentialReportResponse>"""
CREDENTIAL_REPORT = """<GetCredentialReportResponse>
<GetCredentialReportResult>
<Content>{{ report }}</Content>
<GeneratedTime>2015-02-02T20:02:02Z</GeneratedTime>
<ReportFormat>text/csv</ReportFormat>
</GetCredentialReportResult>
<ResponseMetadata>
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
</ResponseMetadata>
</GetCredentialReportResponse>"""
LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """<ListInstanceProfilesForRoleResponse>
<ListInstanceProfilesForRoleResult>
<IsTruncated>false</IsTruncated>
<InstanceProfiles>
{% for profile in instance_profiles %}
<member>
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
<Roles>
{% for role in profile.roles %}
<member>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
</Roles>
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfiles>
</ListInstanceProfilesForRoleResult>
<ResponseMetadata>
<RequestId>6a8c3992-99f4-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListInstanceProfilesForRoleResponse>"""
LIST_MFA_DEVICES_TEMPLATE = """<ListMFADevicesResponse>
<ListMFADevicesResult>
<MFADevices>
{% for device in devices %}
<member>
<UserName>{{ user_name }}</UserName>
<SerialNumber>{{ device.serial_number }}</SerialNumber>
</member>
{% endfor %}
</MFADevices>
<IsTruncated>false</IsTruncated>
</ListMFADevicesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListMFADevicesResponse>"""
LIST_ACCOUNT_ALIASES_TEMPLATE = """<ListAccountAliasesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListAccountAliasesResult>
<IsTruncated>false</IsTruncated>
<AccountAliases>
{% for alias in aliases %}
<member>{{ alias }}</member>
{% endfor %}
</AccountAliases>
</ListAccountAliasesResult>
<ResponseMetadata>
<RequestId>c5a076e9-f1b0-11df-8fbe-45274EXAMPLE</RequestId>
</ResponseMetadata>
</ListAccountAliasesResponse>"""
CREATE_ACCOUNT_ALIAS_TEMPLATE = """<CreateAccountAliasResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>36b5db08-f1b0-11df-8fbe-45274EXAMPLE</RequestId>
</ResponseMetadata>
</CreateAccountAliasResponse>"""
DELETE_ACCOUNT_ALIAS_TEMPLATE = """<DeleteAccountAliasResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteAccountAliasResponse>"""
LIST_GROUPS_FOR_USER_TEMPLATE = """<ListGroupsForUserResponse>
<ListGroupsForUserResult>
<Groups>
{% for group in groups %}
<member>
<Path>{{ group.path }}</Path>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</Groups>
<IsTruncated>false</IsTruncated>
</ListGroupsForUserResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListGroupsForUserResponse>"""
GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetAccountAuthorizationDetailsResult>
<IsTruncated>false</IsTruncated>
<UserDetailList>
{% for user in users %}
<member>
<GroupList>
{% for group in get_groups_for_user(user.name) %}
<member>{{ group.name }}</member>
{% endfor %}
</GroupList>
<AttachedManagedPolicies>
{% for policy in user.managed_policies %}
<member>
<PolicyName>{{ user.managed_policies[policy].name }}</PolicyName>
<PolicyArn>{{ policy }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<UserId>{{ user.id }}</UserId>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
<Arn>{{ user.arn }}</Arn>
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</UserDetailList>
<GroupDetailList>
{% for group in groups %}
<member>
<GroupId>{{ group.id }}</GroupId>
<AttachedManagedPolicies>
{% for policy_arn in group.managed_policies %}
<member>
<PolicyName>{{ group.managed_policies[policy_arn].name }}</PolicyName>
<PolicyArn>{{ policy_arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<GroupName>{{ group.name }}</GroupName>
<Path>{{ group.path }}</Path>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
<GroupPolicyList>
{% for policy in group.policies %}
<member>
<PolicyName>{{ policy }}</PolicyName>
<PolicyDocument>{{ group.get_policy(policy) }}</PolicyDocument>
</member>
{% endfor %}
</GroupPolicyList>
</member>
{% endfor %}
</GroupDetailList>
<RoleDetailList>
{% for role in roles %}
<member>
<RolePolicyList>
{% for inline_policy in role.policies %}
<member>
<PolicyName>{{ inline_policy }}</PolicyName>
<PolicyDocument>{{ role.policies[inline_policy] }}</PolicyDocument>
</member>
{% endfor %}
</RolePolicyList>
<AttachedManagedPolicies>
{% for policy_arn in role.managed_policies %}
<member>
<PolicyName>{{ role.managed_policies[policy_arn].name }}</PolicyName>
<PolicyArn>{{ policy_arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
<InstanceProfileList>
{% for profile in instance_profiles %}
<member>
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
<Roles>
{% for role in profile.roles %}
<member>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
</Roles>
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfileList>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
</RoleDetailList>
<Policies>
{% for policy in policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<PolicyId>{{ policy.id }}</PolicyId>
<Path>{{ policy.path }}</Path>
<PolicyVersionList>
{% for policy_version in policy.versions %}
<member>
<Document>{{ policy_version.document }}</Document>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<VersionId>{{ policy_version.version_id }}</VersionId>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</PolicyVersionList>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>1</AttachmentCount>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<IsAttachable>true</IsAttachable>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</member>
{% endfor %}
</Policies>
</GetAccountAuthorizationDetailsResult>
<ResponseMetadata>
<RequestId>92e79ae7-7399-11e4-8c85-4b53eEXAMPLE</RequestId>
</ResponseMetadata>
</GetAccountAuthorizationDetailsResponse>"""
CREATE_SAML_PROVIDER_TEMPLATE = """<CreateSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<CreateSAMLProviderResult>
<SAMLProviderArn>{{ saml_provider.arn }}</SAMLProviderArn>
</CreateSAMLProviderResult>
<ResponseMetadata>
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</CreateSAMLProviderResponse>"""
LIST_SAML_PROVIDERS_TEMPLATE = """<ListSAMLProvidersResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListSAMLProvidersResult>
<SAMLProviderList>
{% for saml_provider in saml_providers %}
<member>
<Arn>{{ saml_provider.arn }}</Arn>
<ValidUntil>2032-05-09T16:27:11Z</ValidUntil>
<CreateDate>2012-05-09T16:27:03Z</CreateDate>
</member>
{% endfor %}
</SAMLProviderList>
</ListSAMLProvidersResult>
<ResponseMetadata>
<RequestId>fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</ListSAMLProvidersResponse>"""
GET_SAML_PROVIDER_TEMPLATE = """<GetSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetSAMLProviderResult>
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
<ValidUntil>2015-12-31T21:59:59Z</ValidUntil>
<SAMLMetadataDocument>{{ saml_provider.saml_metadata_document }}</SAMLMetadataDocument>
</GetSAMLProviderResult>
<ResponseMetadata>
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</GetSAMLProviderResponse>"""
DELETE_SAML_PROVIDER_TEMPLATE = """<DeleteSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>c749ee7f-99ef-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</DeleteSAMLProviderResponse>"""
UPDATE_SAML_PROVIDER_TEMPLATE = """<UpdateSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UpdateSAMLProviderResult>
<SAMLProviderArn>{{ saml_provider.arn }}</SAMLProviderArn>
</UpdateSAMLProviderResult>
<ResponseMetadata>
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
</ResponseMetadata>
</UpdateSAMLProviderResponse>"""
UPLOAD_SIGNING_CERTIFICATE_TEMPLATE = """<UploadSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UploadSigningCertificateResult>
<Certificate>
<UserName>{{ cert.user_name }}</UserName>
<CertificateId>{{ cert.id }}</CertificateId>
<CertificateBody>{{ cert.body }}</CertificateBody>
<Status>{{ cert.status }}</Status>
</Certificate>
</UploadSigningCertificateResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</UploadSigningCertificateResponse>"""
UPDATE_SIGNING_CERTIFICATE_TEMPLATE = """<UpdateSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</UpdateSigningCertificateResponse>"""
DELETE_SIGNING_CERTIFICATE_TEMPLATE = """<DeleteSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteSigningCertificateResponse>"""
LIST_SIGNING_CERTIFICATES_TEMPLATE = """<ListSigningCertificatesResponse>
<ListSigningCertificatesResult>
<UserName>{{ user_name }}</UserName>
<Certificates>
{% for cert in certificates %}
<member>
<UserName>{{ user_name }}</UserName>
<CertificateId>{{ cert.id }}</CertificateId>
<CertificateBody>{{ cert.body }}</CertificateBody>
<Status>{{ cert.status }}</Status>
</member>
{% endfor %}
</Certificates>
<IsTruncated>false</IsTruncated>
</ListSigningCertificatesResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListSigningCertificatesResponse>"""
TAG_ROLE_TEMPLATE = """<TagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</TagRoleResponse>"""
LIST_ROLE_TAG_TEMPLATE = """<ListRoleTagsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListRoleTagsResult>
<IsTruncated>{{ 'true' if marker else 'false' }}</IsTruncated>
{% if marker %}
<Marker>{{ marker }}</Marker>
{% endif %}
<Tags>
{% for tag in tags %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
</ListRoleTagsResult>
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</ListRoleTagsResponse>"""
UNTAG_ROLE_TEMPLATE = """<UntagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</UntagRoleResponse>"""
| apache-2.0 |
wuwenbin2/onos_bgp_evpn | tools/test/scenarios/bin/create-flow.py | 22 | 1317 | #! /usr/bin/env python
import requests
from requests.auth import HTTPBasicAuth
import sys
if len(sys.argv) != 6:
print "usage: create-flow onos-node name device in-port out-port"
sys.exit(1)
node = sys.argv[1]
name = sys.argv[2]
device = sys.argv[3]
inPort = sys.argv[4]
outPort = sys.argv[5]
flowJsonTemplate = \
'{{' + \
'"priority": 1,' + \
'"isPermanent": true,' + \
'"treatment": {{' + \
'"instructions": [' + \
'{{' + \
'"type": "OUTPUT",' + \
'"port": {}' + \
'}}' + \
']' + \
'}},' + \
'"selector": {{' + \
'"criteria": [' + \
'{{' + \
'"type": "IN_PORT",' + \
'"port": {}' + \
'}}' + \
']' + \
'}}' + \
'}}'
flowJson = flowJsonTemplate.format(inPort, outPort)
intentRequest = requests.post('http://' + node + ':8181/onos/v1/flows/' + device,
auth=HTTPBasicAuth('onos', 'rocks'),
data=flowJson)
if intentRequest.status_code != 201:
print intentRequest.text
sys.exit(1)
location = intentRequest.headers["location"]
print "@stc " + name + "Location=" + location
sys.exit(0)
| apache-2.0 |
anguoyang/SMQTK | python/smqtk/indexing/naive_bayes.py | 1 | 9147 | """
LICENCE
-------
Copyright 2015 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
from . import Indexer
import cPickle
import os.path as osp
import numpy
from sklearn.naive_bayes import MultinomialNB
import smqtk_config
from smqtk.utils import safe_create_dir, SimpleTimer
class NaiveBayesMultinomial (Indexer):
def __init__(self, data_dir):
self.data_dir = osp.join(smqtk_config.DATA_DIR, data_dir)
# Array of UIDs in the index the UID refers to in these internal
# structures
#: :type: list[object]
self._uid_array = None
self._uid2idx_map = None
# Matrix of features
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = None
if self.has_model_files():
self._load_model_files()
@property
def uid_list_filepath(self):
return osp.join(self.data_dir, "uid_list.pickle")
@property
def feature_mat_filepath(self):
return osp.join(self.data_dir, "feature_mat.npy")
def has_model_files(self):
return (osp.isfile(self.uid_list_filepath)
and osp.isfile(self.feature_mat_filepath))
def _load_model_files(self):
with open(self.uid_list_filepath, 'rb') as infile:
#: :type: list[object]
self._uid_array = cPickle.load(infile)
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = numpy.load(self.feature_mat_filepath)
# Mapping of element UID to array/matrix index position
#: :type: dict of int
self._uid2idx_map = {}
for idx, uid in enumerate(self._uid_array):
self._uid2idx_map[uid] = idx
def has_model(self):
"""
:return: True if this indexer has a valid initialized model for
extension and ranking (or doesn't need one to perform those tasks).
:rtype: bool
"""
return (
self._uid_array is not None
and self._feature_mat is not None
and 0 not in self._feature_mat.shape # has dimensionality
)
def generate_model(self, descriptor_map, parallel=None, **kwargs):
"""
Generate this indexers data-model using the given features,
saving it to files in the configured data directory.
:raises RuntimeError: Precaution error when there is an existing data
model for this indexer. Manually delete or move the existing
model before computing another one.
Specific implementations may error on other things. See the specific
implementations for more details.
:raises ValueError: The given feature map had no content.
:param descriptor_map: Mapping of integer IDs to feature data. All feature
data must be of the same size!
:type descriptor_map: dict of (int, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores.
:type parallel: int
"""
super(NaiveBayesMultinomial, self).generate_model(descriptor_map, parallel)
num_features = len(descriptor_map)
ordered_uids = sorted(descriptor_map.keys())
sample_feature = descriptor_map[ordered_uids[0]]
feature_len = len(sample_feature)
# Pre-allocating arrays
self._uid_array = []
self._feature_mat = numpy.zeros(
(num_features, feature_len), dtype=sample_feature.dtype
)
self.log.info("Populating feature matrix")
for i, (uid, feat) in enumerate(descriptor_map.iteritems()):
self._uid_array.append(uid)
self._feature_mat[i] = feat
with SimpleTimer("Saving data files", self.log.info):
safe_create_dir(self.data_dir)
with open(self.uid_list_filepath, 'wb') as ofile:
cPickle.dump(self._uid_array, ofile)
numpy.save(self.feature_mat_filepath, self._feature_mat)
def extend_model(self, uid_feature_map, parallel=None):
"""
Extend, in memory, the current model with the given feature elements.
Online extensions are not saved to data files.
NOTE: For now, if there is currently no data model created for this
indexer / descriptor combination, we will error. In the future, I
would imagine a new model would be created.
:raises RuntimeError: No current model.
:param uid_feature_map: Mapping of integer IDs to features to extend this
indexer's model with.
:type uid_feature_map: dict of (collections.Hashable, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores. Not all implementation support parallel model extension.
:type parallel: int
"""
super(NaiveBayesMultinomial, self).extend_model(uid_feature_map, parallel)
# Shortcut when we're not given anything to actually process
if not uid_feature_map:
self.log.debug("No new features to extend")
return
# Check UID intersection
with SimpleTimer("Checking UID uniqueness", self.log.debug):
cur_uids = set(self._uid_array)
intersection = cur_uids.intersection(uid_feature_map.keys())
if intersection:
raise ValueError("The following IDs are already present in the "
"indexer's model: %s" % tuple(intersection))
# Check feature consistency
# - Assuming that there is are least one feature in our current model...
with SimpleTimer("Checking input feature shape", self.log.debug):
example_feat = self._feature_mat[0]
for feat in uid_feature_map.values():
if feat.shape[0] != example_feat.shape[0]:
raise ValueError("One or more features provided are not of "
"the correct shape! Found %s when we "
"require %s"
% (feat.shape, example_feat.shape[1]))
del example_feat # Deleting so we can resize later in the function
# Extend data structures
# - UID and Feature matrix can be simply resized in-place as we are
# strictly adding to the end of the structure in memory.
# - distance matrix, since we're adding new columns in addition to rows,
# need to create a new matrix of the desired shape, copying in
# existing into new matrix.
self.log.debug("Sorting feature UIDs")
new_uids = sorted(uid_feature_map.keys())
self.log.debug("Calculating before and after sizes.")
num_features_before = self._feature_mat.shape[0]
num_features_after = num_features_before + len(uid_feature_map)
with SimpleTimer("Resizing uid/feature matrices", self.log.debug):
self._feature_mat.resize((num_features_after,
self._feature_mat.shape[1]))
with SimpleTimer("Adding to matrices", self.log.debug):
for i in range(num_features_before, num_features_after):
i_uid = new_uids[i-num_features_before]
self._uid_array.append(i_uid)
assert len(self._uid_array) == i+1
self._uid2idx_map[i_uid] = i
self._feature_mat[i] = uid_feature_map[i_uid]
def rank(self, pos_ids, neg_ids=()):
super(NaiveBayesMultinomial, self).rank(pos_ids, neg_ids)
num_pos = len(pos_ids)
num_neg = len(neg_ids)
train = numpy.ndarray((num_pos + num_neg, self._feature_mat.shape[1]),
dtype=self._feature_mat.dtype)
train[:num_pos, :] = \
self._feature_mat[tuple(self._uid2idx_map[uid] for uid in pos_ids), :]
train[num_pos:num_pos+num_neg, :] = \
self._feature_mat[tuple(self._uid2idx_map[uid] for uid in neg_ids), :]
# Positive elements are label 1, negatives are label 0
labels = numpy.concatenate((numpy.ones(len(pos_ids)),
numpy.zeros(len(neg_ids))))
# Only really care about probability of positive, so just keeping that
# column.
mnb = MultinomialNB()
probs = mnb.fit(train, labels).predict_proba(self._feature_mat)[:, 1]
return dict(zip(self._uid_array, probs))
def reset(self):
"""
Reset this indexer to its original state, i.e. removing any model
extension that may have occurred.
:raises RuntimeError: Unable to reset due to lack of available model.
"""
super(NaiveBayesMultinomial, self).reset()
self._load_model_files()
INDEXER_CLASS = [
NaiveBayesMultinomial
]
| bsd-3-clause |
coinecrypto/coine | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.