repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ucrawler/cp-uc | libs/html5lib/treebuilders/__init__.py | 1730 | 3405 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| gpl-3.0 |
nuxeh/morph | distbuild/sm.py | 3 | 5270 | # mainloop/sm.py -- state machine abstraction
#
# Copyright (C) 2012, 2014-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
classnamepat = re.compile(r"<class '(?P<name>.*)'>")
class StateMachine(object):
'''A state machine abstraction.
The caller may specify call backs for events coming from specific
event sources. An event source might, for example, be a socket
file descriptor, and the event might be incoming data from the
socket. The callback would then process the data, perhaps by
collecting it into a buffer and parsing out messages from it.
A callback gets the event source and event as arguments. It returns
the new state, and a list of new events to
A callback may return or yield new events, which will be handled
eventually. They may or may not be handled in order.
There can only be one callback for one state, source, and event
class combination.
States are represented by unique objects, e.g., strings containing
the names of the states. When a machine wants to stop, it sets its
state to None.
'''
def __init__(self, initial_state):
self._transitions = {}
self.state = self._initial_state = initial_state
self.debug_transitions = False
def setup(self):
'''Set up machine for execution.
This is called when the machine is added to the main loop.
'''
def _key(self, state, event_source, event_class):
return (state, event_source, event_class)
def add_transition(self, state, source, event_class, new_state, callback):
'''Add a transition to the state machine.
When the state machine is in the given state, and an event of
a given type comes from a given source, move the state machine
to the new state and call the callback function.
'''
key = self._key(state, source, event_class)
assert key not in self._transitions, \
'Transition %s already registered' % str(key)
self._transitions[key] = (new_state, callback)
def add_transitions(self, specification):
'''Add many transitions.
The specification is a list of transitions.
Each transition is a tuple of the arguments given to
``add_transition``.
'''
for t in specification:
self.add_transition(*t)
def handle_event(self, event_source, event):
'''Handle a given event.
Return list of new events to handle.
'''
key = self._key(self.state, event_source, event.__class__)
if key not in self._transitions:
if self.debug_transitions: # pragma: no cover
prefix = '%s: handle_event: ' % self.__class__.__name__
logging.debug(prefix + 'not relevant for us: %s' % repr(event))
logging.debug(prefix + 'key: %s', repr(key))
logging.debug(prefix + 'state: %s', repr(self.state))
return []
new_state, callback = self._transitions[key]
if self.debug_transitions: # pragma: no cover
logging.debug(
'%s: state change %s -> %s callback=%s' %
(self.__class__.__name__, self.state, new_state,
str(callback)))
self.state = new_state
if callback is not None:
ret = callback(event_source, event)
if ret is None:
return []
else:
return list(ret)
else:
return []
def dump_dot(self, filename): # pragma: no cover
'''Write a Graphviz DOT file for the state machine.'''
with open(filename, 'w') as f:
f.write('digraph %s {\n' % self._classname(self.__class__))
first = True
for key in self._transitions:
state, src, event_class = key
if first:
f.write('"START" -> "%s" [label=""];\n' %
self._initial_state)
first = False
new_state, callback = self._transitions[key]
if new_state is None:
new_state = 'END'
f.write('"%s" -> "%s" [label="%s"];\n' %
(state, new_state, self._classname(event_class)))
f.write('}\n')
def _classname(self, klass): # pragma: no cover
s = str(klass)
m = classnamepat.match(s)
if m:
return m.group('name').split('.')[-1]
else:
return s
| gpl-2.0 |
JK-River/RobotAIEngine | query_analysis/lib/status.py | 1 | 1233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016,小忆机器人
All rights reserved.
摘 要:管理机器人的状态
创 建 者:余菲
创建日期:16/10/23
"""
import json
import redis
status_redis = redis.Redis('127.0.0.1', '6549', 0, socket_timeout=2)
class RobotStatus(object):
"""
管理机器人的状态
"""
def __init__(self):
pass
@staticmethod
def get_robot_status(robot_code):
"""
取得robot_code的当前状态
:param robot_code:机器人code
:return:
"""
status = status_redis.lindex('status_{}'.format(robot_code), 0)
return json.loads(status) if status else None
@staticmethod
def set_robot_status(robot_code, status_info):
"""
设置robot的状态
:param robot_code: 机器人code
:param status_info: 状态信息
:return:
"""
status_redis.lpush('status_{}'.format(robot_code), json.dumps(status_info))
@staticmethod
def clear_robot_status(robot_code):
"""
清空robot状态
:param robot_code:
:return:
"""
status_redis.delete('status_{}'.format(robot_code))
| apache-2.0 |
redeyser/IceCash2 | install/pyusb-1.0.0rc1/build/lib.linux-x86_64-2.7/usb/backend/openusb.py | 18 | 28018 | # Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
from ctypes import *
import ctypes.util
import usb.util
from usb._debug import methodtrace
import logging
import errno
import sys
import usb._interop as _interop
import usb._objfinalizer as _objfinalizer
import usb.util as util
import usb.libloader
from usb.core import USBError
__author__ = 'Wander Lairson Costa'
__all__ = [
'get_backend'
'OPENUSB_SUCCESS'
'OPENUSB_PLATFORM_FAILURE'
'OPENUSB_NO_RESOURCES'
'OPENUSB_NO_BANDWIDTH'
'OPENUSB_NOT_SUPPORTED'
'OPENUSB_HC_HARDWARE_ERROR'
'OPENUSB_INVALID_PERM'
'OPENUSB_BUSY'
'OPENUSB_BADARG'
'OPENUSB_NOACCESS'
'OPENUSB_PARSE_ERROR'
'OPENUSB_UNKNOWN_DEVICE'
'OPENUSB_INVALID_HANDLE'
'OPENUSB_SYS_FUNC_FAILURE'
'OPENUSB_NULL_LIST'
'OPENUSB_CB_CONTINUE'
'OPENUSB_CB_TERMINATE'
'OPENUSB_IO_STALL'
'OPENUSB_IO_CRC_ERROR'
'OPENUSB_IO_DEVICE_HUNG'
'OPENUSB_IO_REQ_TOO_BIG'
'OPENUSB_IO_BIT_STUFFING'
'OPENUSB_IO_UNEXPECTED_PID'
'OPENUSB_IO_DATA_OVERRUN'
'OPENUSB_IO_DATA_UNDERRUN'
'OPENUSB_IO_BUFFER_OVERRUN'
'OPENUSB_IO_BUFFER_UNDERRUN'
'OPENUSB_IO_PID_CHECK_FAILURE'
'OPENUSB_IO_DATA_TOGGLE_MISMATCH'
'OPENUSB_IO_TIMEOUT'
'OPENUSB_IO_CANCELED'
]
_logger = logging.getLogger('usb.backend.openusb')
OPENUSB_SUCCESS = 0
OPENUSB_PLATFORM_FAILURE = -1
OPENUSB_NO_RESOURCES = -2
OPENUSB_NO_BANDWIDTH = -3
OPENUSB_NOT_SUPPORTED = -4
OPENUSB_HC_HARDWARE_ERROR = -5
OPENUSB_INVALID_PERM = -6
OPENUSB_BUSY = -7
OPENUSB_BADARG = -8
OPENUSB_NOACCESS = -9
OPENUSB_PARSE_ERROR = -10
OPENUSB_UNKNOWN_DEVICE = -11
OPENUSB_INVALID_HANDLE = -12
OPENUSB_SYS_FUNC_FAILURE = -13
OPENUSB_NULL_LIST = -14
OPENUSB_CB_CONTINUE = -20
OPENUSB_CB_TERMINATE = -21
OPENUSB_IO_STALL = -50
OPENUSB_IO_CRC_ERROR = -51
OPENUSB_IO_DEVICE_HUNG = -52
OPENUSB_IO_REQ_TOO_BIG = -53
OPENUSB_IO_BIT_STUFFING = -54
OPENUSB_IO_UNEXPECTED_PID = -55
OPENUSB_IO_DATA_OVERRUN = -56
OPENUSB_IO_DATA_UNDERRUN = -57
OPENUSB_IO_BUFFER_OVERRUN = -58
OPENUSB_IO_BUFFER_UNDERRUN = -59
OPENUSB_IO_PID_CHECK_FAILURE = -60
OPENUSB_IO_DATA_TOGGLE_MISMATCH = -61
OPENUSB_IO_TIMEOUT = -62
OPENUSB_IO_CANCELED = -63
_openusb_errno = {
OPENUSB_SUCCESS:None,
OPENUSB_PLATFORM_FAILURE:None,
OPENUSB_NO_RESOURCES:errno.__dict__.get('ENOMEM', None),
OPENUSB_NO_BANDWIDTH:None,
OPENUSB_NOT_SUPPORTED:errno.__dict__.get('ENOSYS', None),
OPENUSB_HC_HARDWARE_ERROR:errno.__dict__.get('EIO', None),
OPENUSB_INVALID_PERM:errno.__dict__.get('EBADF', None),
OPENUSB_BUSY:errno.__dict__.get('EBUSY', None),
OPENUSB_BADARG:errno.__dict__.get('EINVAL', None),
OPENUSB_NOACCESS:errno.__dict__.get('EACCES', None),
OPENUSB_PARSE_ERROR:None,
OPENUSB_UNKNOWN_DEVICE:errno.__dict__.get('ENODEV', None),
OPENUSB_INVALID_HANDLE:errno.__dict__.get('EINVAL', None),
OPENUSB_SYS_FUNC_FAILURE:None,
OPENUSB_NULL_LIST:None,
OPENUSB_CB_CONTINUE:None,
OPENUSB_CB_TERMINATE:None,
OPENUSB_IO_STALL:errno.__dict__.get('EIO', None),
OPENUSB_IO_CRC_ERROR:errno.__dict__.get('EIO', None),
OPENUSB_IO_DEVICE_HUNG:errno.__dict__.get('EIO', None),
OPENUSB_IO_REQ_TOO_BIG:errno.__dict__.get('E2BIG', None),
OPENUSB_IO_BIT_STUFFING:None,
OPENUSB_IO_UNEXPECTED_PID:errno.__dict__.get('ESRCH', None),
OPENUSB_IO_DATA_OVERRUN:errno.__dict__.get('EOVERFLOW', None),
OPENUSB_IO_DATA_UNDERRUN:None,
OPENUSB_IO_BUFFER_OVERRUN:errno.__dict__.get('EOVERFLOW', None),
OPENUSB_IO_BUFFER_UNDERRUN:None,
OPENUSB_IO_PID_CHECK_FAILURE:None,
OPENUSB_IO_DATA_TOGGLE_MISMATCH:None,
OPENUSB_IO_TIMEOUT:errno.__dict__.get('ETIMEDOUT', None),
OPENUSB_IO_CANCELED:errno.__dict__.get('EINTR', None)
}
class _usb_endpoint_desc(Structure):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bEndpointAddress', c_uint8),
('bmAttributes', c_uint8),
('wMaxPacketSize', c_uint16),
('bInterval', c_uint8),
('bRefresh', c_uint8),
('bSynchAddress', c_uint8)]
class _usb_interface_desc(Structure):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bInterfaceNumber', c_uint8),
('bAlternateSetting', c_uint8),
('bNumEndpoints', c_uint8),
('bInterfaceClass', c_uint8),
('bInterfaceSubClass', c_uint8),
('bInterfaceProtocol', c_uint8),
('iInterface', c_uint8)]
class _usb_config_desc(Structure):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wTotalLength', c_uint16),
('bNumInterfaces', c_uint8),
('bConfigurationValue', c_uint8),
('iConfiguration', c_uint8),
('bmAttributes', c_uint8),
('bMaxPower', c_uint8)]
class _usb_device_desc(Structure):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bcdUSB', c_uint16),
('bDeviceClass', c_uint8),
('bDeviceSubClass', c_uint8),
('bDeviceProtocol', c_uint8),
('bMaxPacketSize0', c_uint8),
('idVendor', c_uint16),
('idProduct', c_uint16),
('bcdDevice', c_uint16),
('iManufacturer', c_uint8),
('iProduct', c_uint8),
('iSerialNumber', c_uint8),
('bNumConfigurations', c_uint8)]
class _openusb_request_result(Structure):
_fields_ = [('status', c_int32),
('transferred_bytes', c_uint32)]
class _openusb_ctrl_request(Structure):
def __init__(self):
super(_openusb_ctrl_request, self).__init__()
self.setup.bmRequestType = 0
self.setup.bRequest = 0
self.setup.wValue = 0
self.setup.wIndex = 0
self.payload = None
self.length = 0
self.timeout = 0
self.flags = 0
self.result.status = 0
self.result.transferred_bytes = 0
self.next = None
class _openusb_ctrl_setup(Structure):
_fields_ = [('bmRequestType', c_uint8),
('bRequest', c_uint8),
('wValue', c_uint16),
('wIndex', c_uint16)]
_fields_ = [('setup', _openusb_ctrl_setup),
('payload', POINTER(c_uint8)),
('length', c_uint32),
('timeout', c_uint32),
('flags', c_uint32),
('result', _openusb_request_result),
('next', c_void_p)]
class _openusb_intr_request(Structure):
_fields_ = [('interval', c_uint16),
('payload', POINTER(c_uint8)),
('length', c_uint32),
('timeout', c_uint32),
('flags', c_uint32),
('result', _openusb_request_result),
('next', c_void_p)]
class _openusb_bulk_request(Structure):
_fields_ = [('payload', POINTER(c_uint8)),
('length', c_uint32),
('timeout', c_uint32),
('flags', c_uint32),
('result', _openusb_request_result),
('next', c_void_p)]
class _openusb_isoc_pkts(Structure):
class _openusb_isoc_packet(Structure):
_fields_ = [('payload', POINTER(c_uint8)),
('length', c_uint32)]
_fields_ = [('num_packets', c_uint32),
('packets', POINTER(_openusb_isoc_packet))]
class _openusb_isoc_request(Structure):
_fields_ = [('start_frame', c_uint32),
('flags', c_uint32),
('pkts', _openusb_isoc_pkts),
('isoc_results', POINTER(_openusb_request_result)),
('isoc_status', c_int32),
('next', c_void_p)]
_openusb_devid = c_uint64
_openusb_busid = c_uint64
_openusb_handle = c_uint64
_openusb_dev_handle = c_uint64
_lib = None
_ctx = None
def _load_library(find_library=None):
# FIXME: cygwin name is "openusb"?
# (that's what the original _load_library() function
# would have searched for)
return usb.libloader.load_locate_library(
('openusb',), 'openusb', "OpenUSB library", find_library=find_library
)
def _setup_prototypes(lib):
# int32_t openusb_init(uint32_t flags , openusb_handle_t *handle);
lib.openusb_init.argtypes = [c_uint32, POINTER(_openusb_handle)]
lib.openusb_init.restype = c_int32
# void openusb_fini(openusb_handle_t handle );
lib.openusb_fini.argtypes = [_openusb_handle]
# uint32_t openusb_get_busid_list(openusb_handle_t handle,
# openusb_busid_t **busids,
# uint32_t *num_busids);
lib.openusb_get_busid_list.argtypes = [
_openusb_handle,
POINTER(POINTER(_openusb_busid)),
POINTER(c_uint32)
]
# void openusb_free_busid_list(openusb_busid_t * busids);
lib.openusb_free_busid_list.argtypes = [POINTER(_openusb_busid)]
# uint32_t openusb_get_devids_by_bus(openusb_handle_t handle,
# openusb_busid_t busid,
# openusb_devid_t **devids,
# uint32_t *num_devids);
lib.openusb_get_devids_by_bus.argtypes = [
_openusb_handle,
_openusb_busid,
POINTER(POINTER(_openusb_devid)),
POINTER(c_uint32)
]
lib.openusb_get_devids_by_bus.restype = c_int32
# void openusb_free_devid_list(openusb_devid_t * devids);
lib.openusb_free_devid_list.argtypes = [POINTER(_openusb_devid)]
# int32_t openusb_open_device(openusb_handle_t handle,
# openusb_devid_t devid ,
# uint32_t flags,
# openusb_dev_handle_t *dev);
lib.openusb_open_device.argtypes = [
_openusb_handle,
_openusb_devid,
c_uint32,
POINTER(_openusb_dev_handle)
]
lib.openusb_open_device.restype = c_int32
# int32_t openusb_close_device(openusb_dev_handle_t dev);
lib.openusb_close_device.argtypes = [_openusb_dev_handle]
lib.openusb_close_device.restype = c_int32
# int32_t openusb_set_configuration(openusb_dev_handle_t dev,
# uint8_t cfg);
lib.openusb_set_configuration.argtypes = [_openusb_dev_handle, c_uint8]
lib.openusb_set_configuration.restype = c_int32
# int32_t openusb_get_configuration(openusb_dev_handle_t dev,
# uint8_t *cfg);
lib.openusb_get_configuration.argtypes = [_openusb_dev_handle, POINTER(c_uint8)]
lib.openusb_get_configuration.restype = c_int32
# int32_t openusb_claim_interface(openusb_dev_handle_t dev,
# uint8_t ifc,
# openusb_init_flag_t flags);
lib.openusb_claim_interface.argtypes = [
_openusb_dev_handle,
c_uint8,
c_int
]
lib.openusb_claim_interface.restype = c_int32
# int32_t openusb_release_interface(openusb_dev_handle_t dev,
# uint8_t ifc);
lib.openusb_release_interface.argtypes = [
_openusb_dev_handle,
c_uint8
]
lib.openusb_release_interface.restype = c_int32
# int32_topenusb_set_altsetting(openusb_dev_handle_t dev,
# uint8_t ifc,
# uint8_t alt);
lib.openusb_set_altsetting.argtypes = [
_openusb_dev_handle,
c_uint8,
c_uint8
]
lib.openusb_set_altsetting.restype = c_int32
# int32_t openusb_reset(openusb_dev_handle_t dev);
lib.openusb_reset.argtypes = [_openusb_dev_handle]
lib.openusb_reset.restype = c_int32
# int32_t openusb_parse_device_desc(openusb_handle_t handle,
# openusb_devid_t devid,
# uint8_t *buffer,
# uint16_t buflen,
# usb_device_desc_t *devdesc);
lib.openusb_parse_device_desc.argtypes = [
_openusb_handle,
_openusb_devid,
POINTER(c_uint8),
c_uint16,
POINTER(_usb_device_desc)
]
lib.openusb_parse_device_desc.restype = c_int32
# int32_t openusb_parse_config_desc(openusb_handle_t handle,
# openusb_devid_t devid,
# uint8_t *buffer,
# uint16_t buflen,
# uint8_t cfgidx,
# usb_config_desc_t *cfgdesc);
lib.openusb_parse_config_desc.argtypes = [
_openusb_handle,
_openusb_devid,
POINTER(c_uint8),
c_uint16,
c_uint8,
POINTER(_usb_config_desc)
]
lib.openusb_parse_config_desc.restype = c_int32
# int32_t openusb_parse_interface_desc(openusb_handle_t handle,
# openusb_devid_t devid,
# uint8_t *buffer,
# uint16_t buflen,
# uint8_t cfgidx,
# uint8_t ifcidx,
# uint8_t alt,
# usb_interface_desc_t *ifcdesc);
lib.openusb_parse_interface_desc.argtypes = [
_openusb_handle,
_openusb_devid,
POINTER(c_uint8),
c_uint16,
c_uint8,
c_uint8,
c_uint8,
POINTER(_usb_interface_desc)
]
lib.openusb_parse_interface_desc.restype = c_int32
# int32_t openusb_parse_endpoint_desc(openusb_handle_t handle,
# openusb_devid_t devid,
# uint8_t *buffer,
# uint16_t buflen,
# uint8_t cfgidx,
# uint8_t ifcidx,
# uint8_t alt,
# uint8_t eptidx,
# usb_endpoint_desc_t *eptdesc);
lib.openusb_parse_endpoint_desc.argtypes = [
_openusb_handle,
_openusb_devid,
POINTER(c_uint8),
c_uint16,
c_uint8,
c_uint8,
c_uint8,
c_uint8,
POINTER(_usb_endpoint_desc)
]
lib.openusb_parse_interface_desc.restype = c_int32
# const char *openusb_strerror(int32_t error );
lib.openusb_strerror.argtypes = [c_int32]
lib.openusb_strerror.restype = c_char_p
# int32_t openusb_ctrl_xfer(openusb_dev_handle_t dev,
# uint8_t ifc,
# uint8_t ept,
# openusb_ctrl_request_t *ctrl);
lib.openusb_ctrl_xfer.argtypes = [
_openusb_dev_handle,
c_uint8,
c_uint8,
POINTER(_openusb_ctrl_request)
]
lib.openusb_ctrl_xfer.restype = c_int32
# int32_t openusb_intr_xfer(openusb_dev_handle_t dev,
# uint8_t ifc,
# uint8_t ept,
# openusb_intr_request_t *intr);
lib.openusb_intr_xfer.argtypes = [
_openusb_dev_handle,
c_uint8,
c_uint8,
POINTER(_openusb_intr_request)
]
lib.openusb_bulk_xfer.restype = c_int32
# int32_t openusb_bulk_xfer(openusb_dev_handle_t dev,
# uint8_t ifc,
# uint8_t ept,
# openusb_bulk_request_t *bulk);
lib.openusb_bulk_xfer.argtypes = [
_openusb_dev_handle,
c_uint8,
c_uint8,
POINTER(_openusb_bulk_request)
]
lib.openusb_bulk_xfer.restype = c_int32
# int32_t openusb_isoc_xfer(openusb_dev_handle_t dev,
# uint8_t ifc,
# uint8_t ept,
# openusb_isoc_request_t *isoc);
lib.openusb_isoc_xfer.argtypes = [
_openusb_dev_handle,
c_uint8,
c_uint8,
POINTER(_openusb_isoc_request)
]
lib.openusb_isoc_xfer.restype = c_int32
def _check(ret):
if hasattr(ret, 'value'):
ret = ret.value
if ret != 0:
raise USBError(_lib.openusb_strerror(ret), ret, _openusb_errno[ret])
return ret
class _Context(_objfinalizer.AutoFinalizedObject):
def __init__(self):
self.handle = _openusb_handle()
_check(_lib.openusb_init(0, byref(self.handle)))
def _finalize_object(self):
_lib.openusb_fini(self.handle)
class _BusIterator(_objfinalizer.AutoFinalizedObject):
def __init__(self):
self.buslist = POINTER(_openusb_busid)()
num_busids = c_uint32()
_check(_lib.openusb_get_busid_list(_ctx.handle,
byref(self.buslist),
byref(num_busids)))
self.num_busids = num_busids.value
def __iter__(self):
for i in range(self.num_busids):
yield self.buslist[i]
def _finalize_object(self):
_lib.openusb_free_busid_list(self.buslist)
class _DevIterator(_objfinalizer.AutoFinalizedObject):
def __init__(self, busid):
self.devlist = POINTER(_openusb_devid)()
num_devids = c_uint32()
_check(_lib.openusb_get_devids_by_bus(_ctx.handle,
busid,
byref(self.devlist),
byref(num_devids)))
self.num_devids = num_devids.value
def __iter__(self):
for i in range(self.num_devids):
yield self.devlist[i]
def _finalize_object(self):
_lib.openusb_free_devid_list(self.devlist)
class _OpenUSB(usb.backend.IBackend):
@methodtrace(_logger)
def enumerate_devices(self):
for bus in _BusIterator():
for devid in _DevIterator(bus):
yield devid
@methodtrace(_logger)
def get_device_descriptor(self, dev):
desc = _usb_device_desc()
_check(_lib.openusb_parse_device_desc(_ctx.handle,
dev,
None,
0,
byref(desc)))
desc.bus = None
desc.address = None
desc.port_number = None
desc.port_numbers = None
desc.speed = None
return desc
@methodtrace(_logger)
def get_configuration_descriptor(self, dev, config):
desc = _usb_config_desc()
_check(_lib.openusb_parse_config_desc(_ctx.handle,
dev,
None,
0,
config,
byref(desc)))
desc.extra_descriptors = None
return desc
@methodtrace(_logger)
def get_interface_descriptor(self, dev, intf, alt, config):
desc = _usb_interface_desc()
_check(_lib.openusb_parse_interface_desc(_ctx.handle,
dev,
None,
0,
config,
intf,
alt,
byref(desc)))
desc.extra_descriptors = None
return desc
@methodtrace(_logger)
def get_endpoint_descriptor(self, dev, ep, intf, alt, config):
desc = _usb_endpoint_desc()
_check(_lib.openusb_parse_endpoint_desc(_ctx.handle,
dev,
None,
0,
config,
intf,
alt,
ep,
byref(desc)))
desc.extra_descriptors = None
return desc
@methodtrace(_logger)
def open_device(self, dev):
handle = _openusb_dev_handle()
_check(_lib.openusb_open_device(_ctx.handle, dev, 0, byref(handle)))
return handle
@methodtrace(_logger)
def close_device(self, dev_handle):
_lib.openusb_close_device(dev_handle)
@methodtrace(_logger)
def set_configuration(self, dev_handle, config_value):
_check(_lib.openusb_set_configuration(dev_handle, config_value))
@methodtrace(_logger)
def get_configuration(self, dev_handle):
config = c_uint8()
_check(_lib.openusb_get_configuration(dev_handle, byref(config)))
return config.value
@methodtrace(_logger)
def set_interface_altsetting(self, dev_handle, intf, altsetting):
_check(_lib.openusb_set_altsetting(dev_handle, intf, altsetting))
@methodtrace(_logger)
def claim_interface(self, dev_handle, intf):
_check(_lib.openusb_claim_interface(dev_handle, intf, 0))
@methodtrace(_logger)
def release_interface(self, dev_handle, intf):
_lib.openusb_release_interface(dev_handle, intf)
@methodtrace(_logger)
def bulk_write(self, dev_handle, ep, intf, data, timeout):
request = _openusb_bulk_request()
memset(byref(request), 0, sizeof(request))
payload, request.length = data.buffer_info()
request.payload = cast(payload, POINTER(c_uint8))
request.timeout = timeout
_check(_lib.openusb_bulk_xfer(dev_handle, intf, ep, byref(request)))
return request.result.transferred_bytes
@methodtrace(_logger)
def bulk_read(self, dev_handle, ep, intf, buff, timeout):
request = _openusb_bulk_request()
memset(byref(request), 0, sizeof(request))
payload, request.length = buff.buffer_info()
request.payload = cast(payload, POINTER(c_uint8))
request.timeout = timeout
_check(_lib.openusb_bulk_xfer(dev_handle, intf, ep, byref(request)))
return request.result.transferred_bytes
@methodtrace(_logger)
def intr_write(self, dev_handle, ep, intf, data, timeout):
request = _openusb_intr_request()
memset(byref(request), 0, sizeof(request))
payload, request.length = data.buffer_info()
request.payload = cast(payload, POINTER(c_uint8))
request.timeout = timeout
_check(_lib.openusb_intr_xfer(dev_handle, intf, ep, byref(request)))
return request.result.transferred_bytes
@methodtrace(_logger)
def intr_read(self, dev_handle, ep, intf, buff, timeout):
request = _openusb_intr_request()
memset(byref(request), 0, sizeof(request))
payload, request.length = buff.buffer_info()
request.payload = cast(payload, POINTER(c_uint8))
request.timeout = timeout
_check(_lib.openusb_intr_xfer(dev_handle, intf, ep, byref(request)))
return request.result.transferred_bytes
# TODO: implement isochronous
# @methodtrace(_logger)
# def iso_write(self, dev_handle, ep, intf, data, timeout):
# pass
# @methodtrace(_logger)
# def iso_read(self, dev_handle, ep, intf, size, timeout):
# pass
@methodtrace(_logger)
def ctrl_transfer(self,
dev_handle,
bmRequestType,
bRequest,
wValue,
wIndex,
data,
timeout):
request = _openusb_ctrl_request()
request.setup.bmRequestType = bmRequestType
request.setup.bRequest = bRequest
request.setup.wValue
request.setup.wIndex
request.timeout = timeout
direction = usb.util.ctrl_direction(bmRequestType)
payload, request.length = data.buffer_info()
request.length *= data.itemsize
request.payload = cast(payload, POINTER(c_uint8))
_check(_lib.openusb_ctrl_xfer(dev_handle, 0, 0, byref(request)))
return request.result.transferred_bytes
@methodtrace(_logger)
def reset_device(self, dev_handle):
_check(_lib.openusb_reset(dev_handle))
@methodtrace(_logger)
def clear_halt(self, dev_handle, ep):
bmRequestType = util.build_request_type(
util.CTRL_OUT,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_ENDPOINT)
self.ctrl_transfer(
dev_handle,
bmRequestType,
0x03,
0,
ep,
_interop.as_array(),
1000)
def get_backend(find_library=None):
try:
global _lib, _ctx
if _lib is None:
_lib = _load_library(find_library)
_setup_prototypes(_lib)
_ctx = _Context()
return _OpenUSB()
except usb.libloader.LibaryException:
# exception already logged (if any)
_logger.error('Error loading OpenUSB backend', exc_info=False)
return None
except Exception:
_logger.error('Error loading OpenUSB backend', exc_info=True)
return None
| gpl-3.0 |
weolar/miniblink49 | third_party/skia/tools/embed_resources.py | 57 | 2573 | #!/usr/bin/python
'''
Copyright 2015 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import argparse
def bytes_from_file(f, chunksize=8192):
while True:
chunk = f.read(chunksize)
if chunk:
for b in chunk:
yield ord(b)
else:
break
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Convert resource files to embedded read only data.',
epilog='''The output (when compiled and linked) can be used as:
struct SkEmbeddedResource {const uint8_t* data; const size_t size;};
struct SkEmbeddedHeader {const SkEmbeddedResource* entries; const int count;};
extern "C" SkEmbeddedHeader const NAME;''')
parser.add_argument('--align', default=1, type=int,
help='minimum alignment (in bytes) of resource data')
parser.add_argument('--name', default='_resource', type=str,
help='the name of the c identifier to export')
parser.add_argument('--input', required=True, type=argparse.FileType('rb'),
nargs='+', help='list of resource files to embed')
parser.add_argument('--output', required=True, type=argparse.FileType('w'),
help='the name of the cpp file to output')
args = parser.parse_args()
out = args.output.write;
out('#include "SkTypes.h"\n')
# Write the resources.
index = 0
for f in args.input:
out('static const uint8_t resource{0:d}[] SK_STRUCT_ALIGN({1:d}) = {{\n'
.format(index, args.align))
bytes_written = 0
bytes_on_line = 0
for b in bytes_from_file(f):
out(hex(b) + ',')
bytes_written += 1
bytes_on_line += 1
if bytes_on_line >= 32:
out('\n')
bytes_on_line = 0
out('};\n')
out('static const size_t resource{0:d}_size = {1:d};\n'
.format(index, bytes_written))
index += 1
# Write the resource entries.
out('struct SkEmbeddedResource { const uint8_t* d; const size_t s; };\n')
out('static const SkEmbeddedResource header[] = {\n')
index = 0
for f in args.input:
out(' {{ resource{0:d}, resource{0:d}_size }},\n'.format(index))
index += 1
out('};\n')
out('static const int header_count = {0:d};\n'.format(index))
# Export the resource header.
out('struct SkEmbeddedHeader {const SkEmbeddedResource* e; const int c;};\n')
out('extern "C" const SkEmbeddedHeader {0:s} = {{ header, header_count }};\n'
.format(args.name))
if __name__ == "__main__":
main()
| apache-2.0 |
markoshorro/gem5 | ext/ply/test/yacc_inf.py | 174 | 1278 | # -----------------------------------------------------------------------------
# yacc_inf.py
#
# Infinite recursion
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
jdahlin/pygobject | demos/gtk-demo/demos/colorselector.py | 2 | 3554 | #!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <johnp@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Color Selector"
description = """
GtkColorSelection lets the user choose a color. GtkColorSelectionDialog is
a prebuilt dialog containing a GtkColorSelection.
"""
from gi.repository import Gtk, Gdk
class ColorSelectorApp:
def __init__(self):
# FIXME: we should allow Gdk.Color to be allocated without parameters
# Also color doesn't seem to work
self.color = Gdk.RGBA()
self.color.red = 0
self.color.blue = 1
self.color.green = 0
self.color.alpha = 1
self.window = Gtk.Window()
self.window.set_title('Color Selection')
self.window.set_border_width(8)
self.window.connect('destroy', lambda w: Gtk.main_quit())
vbox = Gtk.VBox(homogeneous=False,
spacing=8)
vbox.set_border_width(8)
self.window.add(vbox)
# create color swatch area
frame = Gtk.Frame()
frame.set_shadow_type(Gtk.ShadowType.IN)
vbox.pack_start(frame, True, True, 0)
self.da = Gtk.DrawingArea()
self.da.connect('draw', self.draw_cb)
# set a minimum size
self.da.set_size_request(200, 200)
# set the color
self.da.override_background_color(0, self.color)
frame.add(self.da)
alignment = Gtk.Alignment(xalign=1.0,
yalign=0.5,
xscale=0.0,
yscale=0.0)
button = Gtk.Button(label='_Change the above color',
use_underline=True)
alignment.add(button)
vbox.pack_start(alignment, False, False, 0)
button.connect('clicked', self.change_color_cb)
self.window.show_all()
def draw_cb(self, widget, cairo_ctx):
style = widget.get_style_context()
bg_color = style.get_background_color(0)
Gdk.cairo_set_source_rgba(cairo_ctx, bg_color)
cairo_ctx.paint()
return True
def change_color_cb(self, button):
dialog = Gtk.ColorSelectionDialog(title='Changing color')
dialog.set_transient_for(self.window)
colorsel = dialog.get_color_selection()
colorsel.set_previous_rgba(self.color)
colorsel.set_current_rgba(self.color)
colorsel.set_has_palette(True)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.color = colorsel.get_current_rgba()
self.da.override_background_color(0, self.color)
dialog.destroy()
def main(demoapp=None):
ColorSelectorApp()
Gtk.main()
if __name__ == '__main__':
main()
| lgpl-2.1 |
mvaled/OpenUpgrade | addons/hw_scanner/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
linkmyth/advance_medical | node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
darkoc/clowdflows | workflows/migrations/0031_auto__del_data.py | 6 | 15892 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Data'
db.delete_table('workflows_data')
def backwards(self, orm):
# Adding model 'Data'
db.create_table('workflows_data', (
('value', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('workflows', ['Data'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflows.abstractinput': {
'Meta': {'ordering': "('order',)", 'object_name': 'AbstractInput'},
'default': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multi': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractoption': {
'Meta': {'ordering': "['name']", 'object_name': 'AbstractOption'},
'abstract_input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.AbstractInput']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'workflows.abstractoutput': {
'Meta': {'ordering': "('order',)", 'object_name': 'AbstractOutput'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractwidget': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'AbstractWidget'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_progress_bar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('workflows.thumbs.ThumbnailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'interaction_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'interactive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'post_interact_action': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'treeview_image': ('workflows.thumbs.ThumbnailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'widgets'", 'null': 'True', 'to': "orm['auth.User']"}),
'visualization_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wsdl_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'workflows.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['workflows.Category']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['auth.User']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['workflows.Workflow']"})
},
'workflows.connection': {
'Meta': {'object_name': 'Connection'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Input']"}),
'output': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Output']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Workflow']"})
},
'workflows.input': {
'Meta': {'object_name': 'Input'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'multi_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.option': {
'Meta': {'ordering': "['name']", 'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'workflows.output': {
'Meta': {'object_name': 'Output'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'active_workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['workflows.Workflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'workflows.widget': {
'Meta': {'object_name': 'Widget'},
'abstract_widget': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'instances'", 'null': 'True', 'to': "orm['workflows.AbstractWidget']"}),
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction_waiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '50'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Workflow']"}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
'workflows.workflow': {
'Meta': {'ordering': "['name']", 'object_name': 'Workflow'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Untitled workflow'", 'max_length': '200'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template_parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['workflows.Workflow']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflows'", 'to': "orm['auth.User']"}),
'widget': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'workflow_link'", 'unique': 'True', 'null': 'True', 'to': "orm['workflows.Widget']"})
}
}
complete_apps = ['workflows'] | gpl-3.0 |
mlperf/inference_results_v0.7 | closed/Lenovo/code/rnnt/tensorrt/rnn-t_builder.py | 12 | 51939 | #!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import numpy as np
import torch
import argparse
import ctypes
import os, sys
sys.path.insert(0, os.getcwd())
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
RNNT_OPT_PLUGIN_LIBRARY="build/plugins/RNNTOptPlugin/librnntoptplugin.so"
if not os.path.isfile(RNNT_OPT_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(RNNT_OPT_PLUGIN_LIBRARY),
"Please build the RNN-T Opt plugin."
))
ctypes.CDLL(RNNT_OPT_PLUGIN_LIBRARY)
from code.common import logging, dict_get, run_command, BENCHMARKS
from code.common.builder import BenchmarkBuilder, MultiBuilder
from code.rnnt.dali.pipeline import DALIInferencePipeline
import code.common.arguments as common_args
from importlib import import_module
RNNTCalibrator = import_module("code.rnnt.tensorrt.calibrator").RNNTCalibrator
## Support methods
##
def set_tensor_dtype(tensor, t_dtype, t_format) :
# handle datatype
if t_dtype == "int8":
tensor.dtype = trt.int8
tensor.dynamic_range = (-128, 127)
elif t_dtype == "int32":
tensor.dtype = trt.int32
elif t_dtype == "fp16":
tensor.dtype = trt.float16
elif t_dtype == "fp32":
tensor.dtype = trt.float32
else:
assert(False)
# handle format
if t_format == "linear":
tensor.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
elif t_format == "chw4":
tensor.allowed_formats = 1 << int(trt.TensorFormat.CHW4)
elif t_format == "hwc8":
tensor.allowed_formats = 1 << int(trt.TensorFormat.HWC8)
## Common parameters
##
class RNNHyperParam:
# alphabet
labels_size = 29 # alphabet
# encoder
encoder_input_size = 240
encoder_hidden_size = 1024
enc_pre_rnn_layers = 2
enc_post_rnn_layers = 3
# encoder
decoder_input_size = 320
decoder_hidden_size = 320
joint_hidden_size = 512
dec_rnn_layers = 2
## Parent class
##
class RNNTBaseBuilder(BenchmarkBuilder):
model_path = "build/models/rnn-t/DistributedDataParallel_1576581068.9962234-epoch-100.pt"
state_dict = None
@classmethod
def _load_model(cls):
if not cls.state_dict:
logging.info("Loading RNN-T PyTorch model")
checkpoint = torch.load(cls.model_path, map_location="cpu")
cls.state_dict = checkpoint["state_dict"]
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(4<<30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.RNNT, workspace_size=workspace_size)
self.num_profiles = 1
self.max_seq_length = dict_get(args, "max_seq_length", default=128)
self.opt = dict_get(args, "opt", default="greedy")
def initialize(self):
RNNTBaseBuilder._load_model()
self.initialized = True
## Encoder class
##
class EncoderBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
# Encoder has a special enc_batch_size argument that can override batch size
self.batch_size = dict_get(args, "enc_batch_size", default=self.batch_size)
self.name = "encoder.plan"
self.expose_state = not dict_get(args, "seq_splitting_off", default=False)
self.unroll = dict_get(args, "calibrate_encoder", default=False)
if self.unroll:
calib_batch_size = dict_get(self.args, "calib_batch_size", default=10)
calib_max_batches = dict_get(self.args, "calib_max_batches", default=500)
force_calibration = dict_get(self.args, "force_calibration", default=False)
cache_file = dict_get(self.args, "cache_file", default="code/rnnt/tensorrt/calibrator.cache")
calib_data_map = dict_get(self.args, "calib_data_map", default="build/preprocessed_data/rnnt_train_clean_512_fp32/val_map_512.txt")
preprocessed_data_dir = dict_get(self.args, "preprocessed_data_dir", default="build/preprocessed_data")
calib_data_dir = os.path.join(preprocessed_data_dir, "rnnt_train_clean_512_fp32/fp32")
calib_data_dir = dict_get(self.args, "calib_data_dir", default=calib_data_dir)
# We can't run with expose_state because we don't have stimulus/calibrationData for hidden state.
if self.expose_state:
raise NotImplementedError("Can't use --calibrate_encoder without --seq_splitting_off")
if self.input_dtype != 'fp32':
print(f"Warning: Not using --input_type=fp32 may result in accuracy degredation and poor calibration performance given fp32 data")
# If FP32/FP16 was set, unflip that flag in builder_config and set the Int8 flag
if self.precision != "int8":
flag_to_flip = trt.BuilderFlag.FP16 if self.precision == "fp16" else trt.BuilderFlag.FP32
self.builder_config.flags = (self.builder_config.flags) & ~(1 << int(flag_to_flip))
self.builder_config.flags = (self.builder_config.flags) | (1 << int(trt.BuilderFlag.INT8))
if calib_batch_size < self.batch_size:
raise RuntimeError(f"Can't run with calibration batch size less than than network batch size: {calib_batch_size} vs. {self.batch_size}!\nThis is tracked by MLPINF-437")
self.calibrator = RNNTCalibrator(calib_batch_size, calib_max_batches,
force_calibration, cache_file,
calib_data_map, calib_data_dir, self.input_dtype)
self.builder_config.int8_calibrator = self.calibrator
self.cache_file = cache_file
self.need_calibration = force_calibration or not os.path.exists(cache_file)
def add_unrolled_rnns(self, num_layers, max_seq_length, input_tensor, length_tensor, input_size, hidden_size, hidden_state_tensor, cell_state_tensor, name):
past_layer = None
for i in range(num_layers):
if past_layer is None:
# For the first layer, set-up inputs
rnn_layer = self.network.add_rnn_v2(input_tensor, 1, hidden_size, max_seq_length, trt.RNNOperation.LSTM)
rnn_layer.seq_lengths = length_tensor
# Note that we don't hook-up argument-state-tensors because
# calib_unroll can only be called with --seq_splitting_off
else:
# Hook-up the past layer
rnn_layer = self.network.add_rnn_v2(past_layer.get_output(0), 1, hidden_size, max_seq_length, trt.RNNOperation.LSTM)
rnn_layer.seq_lengths = length_tensor
rnn_layer.get_output(0).name = f"{name}{i}_output"
rnn_layer.get_output(1).name = f"{name}{i}_hidden"
rnn_layer.get_output(2).name = f"{name}{i}_cell"
# Set the name as expected for weight finding
rnn_layer.name = name
self._init_weights_per_layer(rnn_layer, i, True)
# Now rename the layer for readability
rnn_layer.name= f"{name}{i}"
# Move on to the next layer
past_layer = rnn_layer
return rnn_layer
def _init_weights_per_layer(self, layer, idx, is_unrolled=False):
name = layer.name
# initialization of the gate weights
weight_ih = RNNTBaseBuilder.state_dict[name + '.weight_ih_l' + str(idx)]
weight_ih = weight_ih.chunk(4,0)
weight_hh = RNNTBaseBuilder.state_dict[name + '.weight_hh_l' + str(idx)]
weight_hh = weight_hh.chunk(4,0)
bias_ih = RNNTBaseBuilder.state_dict[name + '.bias_ih_l' + str(idx)]
bias_ih = bias_ih.chunk(4,0)
bias_hh = RNNTBaseBuilder.state_dict[name + '.bias_hh_l' + str(idx)]
bias_hh = bias_hh.chunk(4,0)
for gate_type in [trt.RNNGateType.INPUT, trt.RNNGateType.CELL, trt.RNNGateType.FORGET, trt.RNNGateType.OUTPUT]:
for is_w in [True, False]:
if is_w:
if (gate_type == trt.RNNGateType.INPUT):
weights = trt.Weights(weight_ih[0].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[0].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.FORGET):
weights = trt.Weights(weight_ih[1].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[1].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.CELL):
weights = trt.Weights(weight_ih[2].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[2].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.OUTPUT):
weights = trt.Weights(weight_ih[3].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[3].numpy().astype(np.float32))
else:
if (gate_type == trt.RNNGateType.INPUT):
weights = trt.Weights(weight_hh[0].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[0].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.FORGET):
weights = trt.Weights(weight_hh[1].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[1].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.CELL):
weights = trt.Weights(weight_hh[2].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[2].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.OUTPUT):
weights = trt.Weights(weight_hh[3].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[3].numpy().astype(np.float32))
layer_idx = idx if not is_unrolled else 0
layer.set_weights_for_gate(layer_idx, gate_type, is_w, weights)
layer.set_bias_for_gate(layer_idx, gate_type, is_w, bias)
def add_rnns(self, num_layers, max_seq_length, input_tensor, length_tensor, input_size, hidden_size, hidden_state_tensor, cell_state_tensor, name):
rnn_layer = self.network.add_rnn_v2(input_tensor, num_layers, hidden_size, max_seq_length, trt.RNNOperation.LSTM)
rnn_layer.seq_lengths = length_tensor
rnn_layer.name = name
# connect the initial hidden/cell state tensors (if they exist)
if hidden_state_tensor : rnn_layer.hidden_state = hidden_state_tensor
if cell_state_tensor : rnn_layer.cell_state = cell_state_tensor
for i in range(rnn_layer.num_layers):
self._init_weights_per_layer(rnn_layer, idx=i)
return rnn_layer
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
input_tensor = self.network.add_input("input", trt.DataType.FLOAT, (-1, self.max_seq_length, RNNHyperParam.encoder_input_size))
set_tensor_dtype(input_tensor, self.input_dtype, self.input_format)
length_tensor = self.network.add_input("length", trt.DataType.INT32, (-1,))
# compute (seq_length + 1) // 2
one_constant = self.network.add_constant((1,), np.array([1]).astype(np.int32))
one_constant.get_output(0).name = "one_constant"
length_add_one = self.network.add_elementwise(length_tensor, one_constant.get_output(0), trt.ElementWiseOperation.SUM)
length_add_one.get_output(0).name = "length_add_one"
two_constant = self.network.add_constant((1,), np.array([2]).astype(np.int32))
two_constant.get_output(0).name = "two_constant"
length_half = self.network.add_elementwise(length_add_one.get_output(0), two_constant.get_output(0), trt.ElementWiseOperation.FLOOR_DIV)
length_half.get_output(0).name = "length_half"
# state handling
enc_tensor_dict = { 'lower' : dict(), 'upper' : dict() }
for tensor_name in [ 'hidden', 'cell' ] :
if self.expose_state :
enc_tensor_dict['lower'][tensor_name] = self.network.add_input("lower_"+tensor_name, trt.DataType.FLOAT, (-1, RNNHyperParam.enc_pre_rnn_layers, RNNHyperParam.encoder_hidden_size))
enc_tensor_dict['upper'][tensor_name] = self.network.add_input("upper_"+tensor_name, trt.DataType.FLOAT, (-1, RNNHyperParam.enc_post_rnn_layers, RNNHyperParam.encoder_hidden_size))
set_tensor_dtype(enc_tensor_dict['lower'][tensor_name], self.input_dtype, self.input_format)
set_tensor_dtype(enc_tensor_dict['upper'][tensor_name], self.input_dtype, self.input_format)
else :
enc_tensor_dict['lower'][tensor_name] = None
enc_tensor_dict['upper'][tensor_name] = None
# instantiate layers
#
# pre_rnn
encoder_add_rnn_dispatch = self.add_unrolled_rnns if self.unroll else self.add_rnns
encoder_lower = encoder_add_rnn_dispatch(RNNHyperParam.enc_pre_rnn_layers,
self.max_seq_length,
input_tensor,
length_tensor,
RNNHyperParam.encoder_input_size,
RNNHyperParam.encoder_hidden_size,
enc_tensor_dict['lower']['hidden'],
enc_tensor_dict['lower']['cell'],
'encoder.pre_rnn.lstm')
# reshape (stack time x 2)
reshape_layer = self.network.add_shuffle(encoder_lower.get_output(0))
reshape_layer.reshape_dims = trt.Dims((0, self.max_seq_length // 2, RNNHyperParam.encoder_hidden_size * 2))
reshape_layer.name = 'encoder_reshape'
reshape_layer.get_output(0).name = 'encoder_reshape'
# post_nnn
encoder_upper = encoder_add_rnn_dispatch(RNNHyperParam.enc_post_rnn_layers,
self.max_seq_length // 2,
reshape_layer.get_output(0),
length_half.get_output(0),
RNNHyperParam.encoder_hidden_size * 2,
RNNHyperParam.encoder_hidden_size,
enc_tensor_dict['upper']['hidden'],
enc_tensor_dict['upper']['cell'],
'encoder.post_rnn.lstm')
# Add expected names for "regular" LSTM layers.
if not self.unroll:
encoder_lower.name = 'encoder_pre_rnn'
encoder_lower.get_output(0).name = "encoder_pre_rnn_output"
encoder_lower.get_output(1).name = "encoder_pre_rnn_hidden"
encoder_lower.get_output(2).name = "encoder_pre_rnn_cell"
encoder_upper.name = 'encoder_post_rnn'
encoder_upper.get_output(0).name = "encoder_post_rnn_output"
encoder_upper.get_output(1).name = "encoder_post_rnn_hidden"
encoder_upper.get_output(2).name = "encoder_post_rnn_cell"
# mark outputs
self.network.mark_output(encoder_upper.get_output(0))
set_tensor_dtype(encoder_upper.get_output(0), self.input_dtype, self.input_format)
if self.expose_state :
# lower_hidden
self.network.mark_output(encoder_lower.get_output(1))
set_tensor_dtype(encoder_lower.get_output(1), self.input_dtype, self.input_format)
# upper_hidden
self.network.mark_output(encoder_upper.get_output(1))
set_tensor_dtype(encoder_upper.get_output(1), self.input_dtype, self.input_format)
# lower_cell
self.network.mark_output(encoder_lower.get_output(2))
set_tensor_dtype(encoder_lower.get_output(2), self.input_dtype, self.input_format)
# upper_cell
self.network.mark_output(encoder_upper.get_output(2))
set_tensor_dtype(encoder_upper.get_output(2), self.input_dtype, self.input_format)
## Decoder class
##
class DecoderBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
self.args = args
self.name = "decoder.plan"
def add_decoder_rnns(self, num_layers, input_tensor, hidden_size, hidden_state_tensor, cell_state_tensor, name):
max_seq_length = 1 # processed single step
if not dict_get(self.args, "decoderPlugin", default=True):
rnn_layer = self.network.add_rnn_v2(input_tensor, num_layers, hidden_size, max_seq_length, trt.RNNOperation.LSTM)
# connect the initial hidden/cell state tensors
rnn_layer.hidden_state = hidden_state_tensor
rnn_layer.cell_state = cell_state_tensor
# initialization of the gate weights
for i in range(num_layers):
weight_ih = RNNTBaseBuilder.state_dict[name + '.weight_ih_l' + str(i)]
weight_ih = weight_ih.chunk(4,0)
weight_hh = RNNTBaseBuilder.state_dict[name + '.weight_hh_l' + str(i)]
weight_hh = weight_hh.chunk(4,0)
bias_ih = RNNTBaseBuilder.state_dict[name + '.bias_ih_l' + str(i)]
bias_ih = bias_ih.chunk(4,0)
bias_hh = RNNTBaseBuilder.state_dict[name + '.bias_hh_l' + str(i)]
bias_hh = bias_hh.chunk(4,0)
for gate_type in [trt.RNNGateType.INPUT, trt.RNNGateType.CELL, trt.RNNGateType.FORGET, trt.RNNGateType.OUTPUT]:
for is_w in [True, False]:
if is_w:
if (gate_type == trt.RNNGateType.INPUT):
weights = trt.Weights(weight_ih[0].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[0].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.FORGET):
weights = trt.Weights(weight_ih[1].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[1].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.CELL):
weights = trt.Weights(weight_ih[2].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[2].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.OUTPUT):
weights = trt.Weights(weight_ih[3].numpy().astype(np.float32))
bias = trt.Weights(bias_ih[3].numpy().astype(np.float32))
else:
if (gate_type == trt.RNNGateType.INPUT):
weights = trt.Weights(weight_hh[0].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[0].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.FORGET):
weights = trt.Weights(weight_hh[1].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[1].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.CELL):
weights = trt.Weights(weight_hh[2].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[2].numpy().astype(np.float32))
elif (gate_type == trt.RNNGateType.OUTPUT):
weights = trt.Weights(weight_hh[3].numpy().astype(np.float32))
bias = trt.Weights(bias_hh[3].numpy().astype(np.float32))
rnn_layer.set_weights_for_gate(i, gate_type, is_w, weights)
rnn_layer.set_bias_for_gate(i, gate_type, is_w, bias)
return rnn_layer
else:
layer = None
plugin = None
plugin_name = "RNNTDecoderPlugin"
# logging.info(trt.get_plugin_registry().plugin_creator_list)
for plugin_creator in trt.get_plugin_registry().plugin_creator_list:
if plugin_creator.name == plugin_name:
logging.info("Decoder Plugin found")
fields = [];
fields.append(trt.PluginField("numLayers", np.array([num_layers], dtype=np.int32), trt.PluginFieldType.INT32))
fields.append(trt.PluginField("hiddenSize", np.array([hidden_size], dtype=np.int32), trt.PluginFieldType.INT32))
fields.append(trt.PluginField("inputSize", np.array([hidden_size], dtype=np.int32), trt.PluginFieldType.INT32))
fields.append(trt.PluginField("dataType", np.array([trt.DataType.HALF], dtype=np.int32), trt.PluginFieldType.INT32))
for layer in range(num_layers):
weights = torch.cat((RNNTBaseBuilder.state_dict[name + '.weight_ih_l' + str(layer)], RNNTBaseBuilder.state_dict[name + '.weight_hh_l' + str(layer)]), 0)
assert(weights.numpy().astype(np.float16).size == 8 * hidden_size * hidden_size)
fields.append(trt.PluginField("weights", weights.numpy().astype(np.float16), trt.PluginFieldType.FLOAT16))
for layer in range(num_layers):
biases = torch.cat((RNNTBaseBuilder.state_dict[name + '.bias_ih_l' + str(layer)], RNNTBaseBuilder.state_dict[name + '.bias_hh_l' + str(layer)]), 0)
fields.append(trt.PluginField("bias", biases.numpy().astype(np.float16), trt.PluginFieldType.FLOAT16))
field_collection = trt.PluginFieldCollection(fields)
plugin = plugin_creator.create_plugin(name=plugin_name, field_collection=field_collection)
inputs = [];
inputs.append(input_tensor)
inputs.append(hidden_state_tensor)
inputs.append(cell_state_tensor)
layer = self.network.add_plugin_v2(inputs, plugin)
break
if not plugin:
logging.error("Plugin not found")
return layer
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Decoder
# Embedding layer : (29 => 320)
# Rnn : LSTM layers=2, h=320
# Embedding layer
# gather layer with LUT of RNNHyperParam.labels_size=29 entries with RNNHyperParam.decoder_input_size=320 size per entry
# blank token does not need to be looked up, whereas the SOS (start-of-sequence) requires all zeros for embed vector
dec_embedding_input = self.network.add_input("dec_embedding_input", trt.DataType.INT32, (-1, 1))
dec_embedding_orig = RNNTBaseBuilder.state_dict["prediction.embed.weight"].numpy().astype(np.float32)
dec_embedding_sos = np.zeros((1, RNNHyperParam.decoder_input_size), dtype=np.float32)
dec_embedding_weights = trt.Weights(np.concatenate((dec_embedding_orig, dec_embedding_sos), axis=0))
dec_embedding_lut = self.network.add_constant( (RNNHyperParam.labels_size, RNNHyperParam.decoder_input_size), dec_embedding_weights)
self.dec_embedding = self.network.add_gather(dec_embedding_lut.get_output(0), dec_embedding_input, axis=0)
self.dec_embedding.name = 'decoder_embedding'
# Rnn layer
dec_rnn_layers = RNNHyperParam.dec_rnn_layers
# Create tensors [ batch, seq=1, input ]
dec_tensor_dict = dict()
# dec_tensor_dict['input'] = self.network.add_input("dec_input", trt.DataType.FLOAT, (-1, 1, RNNHyperParam.decoder_input_size))
dec_tensor_dict['input'] = self.dec_embedding.get_output(0)
dec_tensor_dict['hidden'] = self.network.add_input("hidden", trt.DataType.FLOAT, (-1, dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
dec_tensor_dict['cell'] = self.network.add_input("cell", trt.DataType.FLOAT, (-1, dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
for dec_tensor_name, dec_tensor_val in dec_tensor_dict.items():
# RNN input is an internal layer whose type we should let TRT determine
if dec_tensor_name != 'input':
set_tensor_dtype(dec_tensor_val, self.input_dtype, self.input_format)
# Instantiate RNN
# logging.info("dec_input_size = {:}".format(dec_input_size))
logging.info("dec_embed_lut OUT tensor shape = {:}".format(dec_embedding_lut.get_output(0).shape))
logging.info("dec_embedding OUT tensor shape = {:}".format(self.dec_embedding.get_output(0).shape))
self.decoder = self.add_decoder_rnns(dec_rnn_layers,
dec_tensor_dict['input'],
RNNHyperParam.decoder_hidden_size,
dec_tensor_dict['hidden'],
dec_tensor_dict['cell'],
'prediction.dec_rnn.lstm')
self.decoder.name = 'decoder_rnn'
# Determine outputs (and override size)
# output
# hidden
# cell
for output_idx in range(3) :
output_tensor = self.decoder.get_output(output_idx)
self.network.mark_output(output_tensor)
set_tensor_dtype(output_tensor, self.input_dtype, self.input_format)
## Joint class
##
# Famility of network components for Joint
class JointNetComponents() :
def create_split_fc1_layer(layer_name,
network,
input_tensor,
input_size,
output_size,
weight_offset,
joint_fc1_weight_ckpt,
joint_fc1_bias_ckpt,
add_bias = False) :
# detach weight (using weight_offset)
joint_fc1_kernel_np = np.zeros((output_size, input_size))
for i in range(output_size):
for j in range(input_size):
joint_fc1_kernel_np[i][j] = joint_fc1_weight_ckpt.numpy()[i][j + weight_offset]
joint_fc1_kernel = joint_fc1_kernel_np.astype(np.float32)
# detach bias (if available)
joint_fc1_bias_np = np.zeros((output_size))
if add_bias :
for i in range(output_size):
joint_fc1_bias_np[i] = joint_fc1_bias_ckpt.numpy()[i]
joint_fc1_bias = joint_fc1_bias_np.astype(np.float32)
# instantiate FC layer
if add_bias :
joint_fc1 = network.add_fully_connected(
input_tensor,
output_size,
joint_fc1_kernel,
joint_fc1_bias)
else :
joint_fc1 = network.add_fully_connected(
input_tensor,
output_size,
joint_fc1_kernel)
# epilogue
joint_fc1.name = layer_name
return joint_fc1
# Detached FC1_a and FC1_b builder
class JointFc1Builder(RNNTBaseBuilder):
def __init__(self, name, port, args):
super().__init__(args)
self.name = name
self.name = name + ".plan"
if (port != 'encoder' and port != 'decoder'):
logging.info("JointFc1Builder: unrecognized port")
self.port = port
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Create tensors [ batch, seq=1, input ]
joint_tensor_dict = dict()
if self.port == 'encoder' :
joint_tensor_dict['enc_input'] = self.network.add_input("enc_input", trt.DataType.FLOAT, (-1, RNNHyperParam.encoder_hidden_size, 1, 1))
if self.port == 'decoder' :
joint_tensor_dict['dec_input'] = self.network.add_input("dec_input", trt.DataType.FLOAT, (-1, RNNHyperParam.decoder_hidden_size, 1, 1))
for joint_tensor in joint_tensor_dict.values() :
set_tensor_dtype(joint_tensor, self.input_dtype, "hwc8") # hwc8 to avoid reformatting
### # FC1 + bias :
joint_fc1_output_size = RNNHyperParam.joint_hidden_size
joint_fc1_weight_ckpt = RNNTBaseBuilder.state_dict['joint_net.0.weight']
joint_fc1_bias_ckpt = RNNTBaseBuilder.state_dict['joint_net.0.bias']
# Instantiate two split FC1's : one for the encoder and one for the decoder
if self.port == 'encoder' :
joint_fc1_a = JointNetComponents.create_split_fc1_layer('joint_fc1_a',
self.network,
joint_tensor_dict['enc_input'],
RNNHyperParam.encoder_hidden_size,
joint_fc1_output_size,
0,
joint_fc1_weight_ckpt,
joint_fc1_bias_ckpt,
True)
final_output = joint_fc1_a.get_output(0)
if self.port == 'decoder' :
joint_fc1_b = JointNetComponents.create_split_fc1_layer('joint_fc1_b',
self.network,
joint_tensor_dict['dec_input'],
RNNHyperParam.decoder_hidden_size,
joint_fc1_output_size,
RNNHyperParam.encoder_hidden_size,
joint_fc1_weight_ckpt,
joint_fc1_bias_ckpt)
final_output = joint_fc1_b.get_output(0)
# set output properties
self.network.mark_output(final_output)
set_tensor_dtype(final_output, self.input_dtype, "hwc8") # hwc8 to avoid reformatting
# fc1_a and fc1_b classes for encoder / decoder
JointFc1_A_Builder = lambda args: JointFc1Builder("fc1_a", "encoder", args)
JointFc1_B_Builder = lambda args: JointFc1Builder("fc1_b", "decoder", args)
# Detached Joint backed builder (FC1_SUM + FC1_RELU + FC2 + topK)
class JointBackendBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
self.name = "joint_backend.plan"
self.dump_joint_fc2_weights = not dict_get(args, "no_dump_joint_fc2_weights", default=False)
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Create tensors [ batch, seq=1, input ]
joint_fc1_output_size = RNNHyperParam.joint_hidden_size
joint_tensor_dict = dict()
joint_tensor_dict['joint_fc1_a_output'] = self.network.add_input("joint_fc1_a_output", trt.DataType.FLOAT, (-1, 1, joint_fc1_output_size))
joint_tensor_dict['joint_fc1_b_output'] = self.network.add_input("joint_fc1_b_output", trt.DataType.FLOAT, (-1, 1, joint_fc1_output_size))
for joint_tensor in joint_tensor_dict.values() :
set_tensor_dtype(joint_tensor, self.input_dtype, self.input_format)
### # element_wise SUM
joint_fc1_sum = self.network.add_elementwise(joint_tensor_dict['joint_fc1_a_output'], joint_tensor_dict['joint_fc1_b_output'], trt.ElementWiseOperation.SUM);
joint_fc1_sum.name = 'joint_fc1_sum'
### # reLU
joint_relu = self.network.add_activation(joint_fc1_sum.get_output(0), trt.ActivationType.RELU)
joint_relu.name = 'joint_relu'
### # FC2 + bias :
joint_fc2_input_size = joint_fc1_output_size
joint_fc2_output_size = RNNHyperParam.labels_size
joint_fc2_weight_ckpt = RNNTBaseBuilder.state_dict['joint_net.3.weight']
joint_fc2_bias_ckpt = RNNTBaseBuilder.state_dict['joint_net.3.bias']
joint_fc2_kernel = trt.Weights(joint_fc2_weight_ckpt.numpy().astype(np.float32))
joint_fc2_bias = trt.Weights(joint_fc2_bias_ckpt.numpy().astype(np.float32))
joint_fc2_shuffle = self.network.add_shuffle(joint_relu.get_output(0)) # Add an extra dimension for FC processing
joint_fc2_shuffle.reshape_dims = ( -1, joint_fc2_input_size, 1, 1 )
joint_fc2_shuffle.name = 'joint_fc2_shuffle'
joint_fc2 = self.network.add_fully_connected(
joint_fc2_shuffle.get_output(0),
joint_fc2_output_size,
joint_fc2_kernel,
joint_fc2_bias)
joint_fc2.name = 'joint_fc2'
# opt = GREEDY
# -------------
# - Do not use softmax layer
# - Use TopK (K=1) GPU sorting
# TopK (k=1)
red_dim = 1 << 1
joint_top1 = self.network.add_topk(joint_fc2.get_output(0), trt.TopKOperation.MAX, 1, red_dim)
joint_top1.name = 'joint_top1'
# Final output
# final_output = joint_fc2.get_output(0)
final_output = joint_top1.get_output(1)
self.network.mark_output(final_output)
# epilogue: dump fc2 weights and bias if required
if self.dump_joint_fc2_weights:
joint_fc2_weight_ckpt.numpy().astype(np.float16).tofile(self.engine_dir+'/joint_fc2_weight_ckpt.fp16.dat')
joint_fc2_bias_ckpt.numpy().astype(np.float16).tofile(self.engine_dir+'/joint_fc2_bias_ckpt.fp16.dat')
joint_fc2_weight_ckpt.numpy().astype(np.float32).tofile(self.engine_dir+'/joint_fc2_weight_ckpt.fp32.dat')
joint_fc2_bias_ckpt.numpy().astype(np.float32).tofile(self.engine_dir+'/joint_fc2_bias_ckpt.fp32.dat')
# Full Joint builder: FC1 + FC2 + softmax/topK
class JointBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
self.name = "joint.plan"
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Joint: [ BS, seq=1, channel ]
# Concat : (enc:1024 + pred:320 )
# FC1 : FC 1344 x 512
# bias1 :
# reLU : reLU
# FC2 : FC 512 x 29
# bias2 :
# Create tensors [ batch, seq=1, input ]
joint_tensor_dict = dict()
joint_tensor_dict['enc_input'] = self.network.add_input("enc_input", trt.DataType.FLOAT, (-1, 1, RNNHyperParam.encoder_hidden_size))
joint_tensor_dict['dec_input'] = self.network.add_input("dec_input", trt.DataType.FLOAT, (-1, 1, RNNHyperParam.decoder_hidden_size))
for joint_tensor in joint_tensor_dict.values() :
set_tensor_dtype(joint_tensor, self.input_dtype, self.input_format)
### # FC1 + bias :
joint_fc1_output_size = RNNHyperParam.joint_hidden_size
joint_fc1_weight_ckpt = RNNTBaseBuilder.state_dict['joint_net.0.weight']
joint_fc1_bias_ckpt = RNNTBaseBuilder.state_dict['joint_net.0.bias']
# Instantiate two split FC1's : one for the encoder and one for the decoder
joint_fc1_a = JointNetComponents.create_split_fc1_layer('joint_fc1_a',
self.network,
joint_tensor_dict['enc_input'],
RNNHyperParam.encoder_hidden_size,
joint_fc1_output_size,
0,
joint_fc1_weight_ckpt,
joint_fc1_bias_ckpt,
True)
joint_fc1_b = JointNetComponents.create_split_fc1_layer('joint_fc1_b',
self.network,
joint_tensor_dict['dec_input'],
RNNHyperParam.decoder_hidden_size,
joint_fc1_output_size,
RNNHyperParam.encoder_hidden_size,
joint_fc1_weight_ckpt,
joint_fc1_bias_ckpt)
### # element_wise SUM
joint_fc1_sum = self.network.add_elementwise(joint_fc1_a.get_output(0), joint_fc1_b.get_output(0), trt.ElementWiseOperation.SUM);
joint_fc1_sum.name = 'joint_fc1_sum'
### # reLU
joint_relu = self.network.add_activation(joint_fc1_sum.get_output(0), trt.ActivationType.RELU)
joint_relu.name = 'joint_relu'
### # FC2 + bias :
joint_fc2_input_size = joint_fc1_output_size
joint_fc2_output_size = RNNHyperParam.labels_size
joint_fc2_weight_ckpt = RNNTBaseBuilder.state_dict['joint_net.3.weight']
joint_fc2_bias_ckpt = RNNTBaseBuilder.state_dict['joint_net.3.bias']
joint_fc2_kernel = trt.Weights(joint_fc2_weight_ckpt.numpy().astype(np.float32))
joint_fc2_bias = trt.Weights(joint_fc2_bias_ckpt.numpy().astype(np.float32))
joint_fc2_shuffle = self.network.add_shuffle(joint_relu.get_output(0)) # Add an extra dimension for FC processing
joint_fc2_shuffle.reshape_dims = ( -1, joint_fc2_input_size, 1, 1 )
joint_fc2_shuffle.name = 'joint_fc2_shuffle'
joint_fc2 = self.network.add_fully_connected(
joint_fc2_shuffle.get_output(0),
joint_fc2_output_size,
joint_fc2_kernel,
joint_fc2_bias)
joint_fc2.name = 'joint_fc2'
# opt = DEFAULT
# -------------
# - Use softmax layer
# - No GPU sorting
if self.opt == 'default' :
# Softmax
softmax_layer = self.network.add_softmax(joint_fc2.get_output(0))
softmax_layer.name = 'joint_softmax'
# Final output
final_output = softmax_layer.get_output(0)
self.network.mark_output(final_output)
set_tensor_dtype(final_output, self.input_dtype, self.input_format)
# opt = GREEDY
# -------------
# - Do not use softmax layer
# - Use TopK (K=1) GPU sorting
elif self.opt == 'greedy' :
# TopK (k=1)
red_dim = 1 << 1
joint_top1 = self.network.add_topk(joint_fc2.get_output(0), trt.TopKOperation.MAX, 1, red_dim)
joint_top1.name = 'joint_top1'
# Final output
# final_output = joint_fc2.get_output(0)
final_output = joint_top1.get_output(1)
self.network.mark_output(final_output)
## Isel class
##
class IselBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
self.name = "isel.plan"
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Isel:
# output_hidden: [ BS, layers=2, decoder_hidden_size=320 ]
# output_cell : [ BS, layers=2, decoder_hidden_size=320 ]
#
# input_select : [ BS, 1, 1 ]
# input0_hidden: [ BS, layers=2, decoder_hidden_size=320 ]
# input0_cell : [ BS, layers=2, decoder_hidden_size=320 ]
# input1_hidden: [ BS, layers=2, decoder_hidden_size=320 ]
# input1_cell : [ BS, layers=2, decoder_hidden_size=320 ]
# Declare input tensors: port 0
input0_hidden = self.network.add_input("input0_hidden", trt.DataType.FLOAT, (-1, RNNHyperParam.dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
input0_cell = self.network.add_input("input0_cell", trt.DataType.FLOAT, (-1, RNNHyperParam.dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
if self.opt == "greedy" :
input0_winner = self.network.add_input("input0_winner", trt.DataType.INT32, (-1, 1, 1))
# Declare input tensors: port 1
input1_hidden = self.network.add_input("input1_hidden", trt.DataType.FLOAT, (-1, RNNHyperParam.dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
input1_cell = self.network.add_input("input1_cell", trt.DataType.FLOAT, (-1, RNNHyperParam.dec_rnn_layers, RNNHyperParam.decoder_hidden_size))
if self.opt == "greedy" :
input1_winner = self.network.add_input("input1_winner", trt.DataType.INT32, (-1, 1, 1))
# Reformat tensors
for input_tensor in ( input0_hidden, input0_cell, input1_hidden, input1_cell) :
set_tensor_dtype(input_tensor, self.input_dtype, self.input_format)
# One Iselect layer per component
if self.input_dtype != "fp16" or self.opt != "greedy":
# logging.info("Not using select plugin due to input datatype not being fp16 or opt not being greedy")
# assert(False);
# Select tensor
input_select = self.network.add_input("input_select", trt.DataType.BOOL, (-1, 1, 1))
isel_hidden = self.network.add_select(input_select, input0_hidden, input1_hidden)
isel_cell = self.network.add_select(input_select, input0_cell, input1_cell)
isel_hidden.name = 'Iselect Dec hidden'
isel_cell.name = 'Iselect Dec cell'
if self.opt == "greedy" :
isel_winner = self.network.add_select(input_select, input0_winner, input1_winner)
isel_winner.name = 'Iselect Dec winner'
# Declare outputs
output_hidden = isel_hidden.get_output(0)
output_cell = isel_cell.get_output(0)
self.network.mark_output(output_hidden)
self.network.mark_output(output_cell)
set_tensor_dtype(output_hidden, self.input_dtype, self.input_format)
set_tensor_dtype(output_cell, self.input_dtype, self.input_format)
if self.opt == "greedy" :
output_winner = isel_winner.get_output(0)
self.network.mark_output(output_winner)
else:
sel3Layer = None
plugin = None
plugin_name = "RNNTSelectPlugin"
# Select tensor
input_select = self.network.add_input("input_select", trt.DataType.INT32, (-1, 1, 1))
for plugin_creator in trt.get_plugin_registry().plugin_creator_list:
if plugin_creator.name == plugin_name:
logging.info("Select Plugin found")
fields = [];
field_collection = trt.PluginFieldCollection(fields)
plugin = plugin_creator.create_plugin(name=plugin_name, field_collection=field_collection)
inputs = [];
inputs.append(input_select)
inputs.append(input0_hidden)
inputs.append(input1_hidden)
inputs.append(input0_cell)
inputs.append(input1_cell)
inputs.append(input0_winner)
inputs.append(input1_winner)
sel3Layer = self.network.add_plugin_v2(inputs, plugin)
sel3Layer.name = 'Select3'
break
if not plugin:
logging.error("Select plugin not found")
# Declare outputs
output_hidden = sel3Layer.get_output(0)
output_cell = sel3Layer.get_output(1)
self.network.mark_output(output_hidden)
self.network.mark_output(output_cell)
set_tensor_dtype(output_hidden, self.input_dtype, self.input_format)
set_tensor_dtype(output_cell, self.input_dtype, self.input_format)
output_winner = sel3Layer.get_output(2)
self.network.mark_output(output_winner)
set_tensor_dtype(output_winner, "int32", self.input_format)
## Igather class
##
class IgatherBuilder(RNNTBaseBuilder):
def __init__(self, args):
super().__init__(args)
self.name = "igather.plan"
def initialize(self):
super().initialize()
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Igather:
# encoder_input : [ BS, SEQ//2=1152//2, encoder_hidden_size=1024 ] native
# t_coordinate : [ BS, 1, ] int32
#
# igather_output : [ BS, 1, encoder_hidden_size=1024 ] native
# Declare input tensors
encoder_input = self.network.add_input("encoder_input", trt.DataType.FLOAT, (-1, self.max_seq_length // 2, RNNHyperParam.encoder_hidden_size))
# t_coordinate = self.network.add_input("t_coordinate", trt.DataType.INT32, (-1, 1))
# t_coordinate = self.network.add_input("t_coordinate", trt.DataType.INT32, trt.Dims([-1]))
t_coordinate = self.network.add_input("t_coordinate", trt.DataType.INT32, trt.Dims([-1]))
set_tensor_dtype(encoder_input, self.input_dtype, self.input_format)
# igather_layer = self.network.add_gather(encoder_input, t_coordinate, axis=0)
igather_layer = self.network.add_gather(encoder_input, t_coordinate, axis=1)
igather_layer.name = "Igather joint cell"
igather_layer.num_elementwise_dims = 1
# Declare outputs
igather_output = igather_layer.get_output(0)
self.network.mark_output(igather_output)
set_tensor_dtype(igather_output, self.input_dtype, self.input_format)
## Main methods
##
class DisaggregatedJointBuilder(MultiBuilder):
builders = {
"joint_fc1_a": JointFc1_A_Builder,
"joint_fc2_b": JointFc1_B_Builder,
"joint_backend": JointBackendBuilder,
}
def __init__(self, args):
super().__init__(DisaggregatedJointBuilder.builders.values(), args)
class RNNTBuilder(MultiBuilder):
builders = {
"encoder": EncoderBuilder,
"decoder": DecoderBuilder,
"isel": IselBuilder,
"igather": IgatherBuilder,
}
def __init__(self, args):
super().__init__(RNNTBuilder.builders.values(), args)
audio_fp16_input = dict_get(args, "audio_fp16_input", default=True)
# These flags are only exposed if this file is run directly, not through the Makefile pipeline
topology = dict_get(args, "topology", default="build_all")
disagg_joint = dict_get(args, "disaggregated_joint", default=True)
if disagg_joint:
self.builders.append(DisaggregatedJointBuilder)
else:
self.builders.append(JointBuilder)
# topology overrides which builders we want to build
if topology in RNNTBuilder.builders:
self.builders = [ RNNTBuilder.builders[topology] ]
elif topology == "joint":
self.builders = [ JointBuilder ]
elif topology == "build_all":
# This case is here to explicitly say that it is the default case.
None
else:
raise(Exception("Unknown topology: {}".format(topology)))
if not os.path.exists("build/bin/dali"):
os.makedirs("build/bin/dali")
filename = "build/bin/dali/dali_pipeline_gpu_{:}.pth".format("fp16" if audio_fp16_input else "fp32")
dali_pipeline = DALIInferencePipeline.from_config(
device="gpu",
config=dict(), # Default case
device_id=0,
batch_size=16,
total_samples=16, # Unused, can be set arbitrarily
num_threads=2,
audio_fp16_input=audio_fp16_input
)
dali_pipeline.serialize(filename=filename)
def calibrate(self):
enc_calib_args = dict(self.args) # Make copy so we don't overwrite anything
# These flags are required to run encoder in calibration mode
enc_calib_args.update({
"seq_splitting_off": True,
"calibrate_encoder": True,
"input_dtype": "fp32",
"max_seq_length": 512,
"force_calibration": True,
"calib_max_batches": 30,
"batch_size": 100,
"enc_batch_size": 100,
"calib_batch_size": 100,
"calib_data_map": dict_get(self.args, "calib_data_map", default="data_maps/rnnt_train_clean_512/val_map.txt"),
"preprocessed_data_dir": dict_get(os.environ, "PREPROCESSED_DATA_DIR", default="build/preprocessed_data")
})
RNNTBuilder.builders["encoder"](enc_calib_args).calibrate()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=2048)
parser.add_argument("--enc_batch_size", type=int, default=None)
parser.add_argument("--max_seq_length", type=int, default=128)
parser.add_argument("--engine_dir", default="build/engines/rnnt")
parser.add_argument("--config_ver", default="default")
parser.add_argument("--verbose_nvtx", action="store_true")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--precision", choices=["fp32", "fp16", "int8"], default="fp16")
parser.add_argument("--input_dtype", choices=["fp32", "fp16", "int8"], default="fp16")
parser.add_argument("--audio_fp16_input", type=bool, default=True)
parser.add_argument("--input_format", choices=["linear", "hwc8", "chw4", "chw32"], default="linear")
parser.add_argument("--topology", default="build_all", help="Options: encoder/decoder/joint/isel/build_all")
parser.add_argument("--opt", choices=["default", "greedy"], default="greedy")
parser.add_argument("--decoderPlugin", default=True, help="Options: True/False")
parser.add_argument("--seq_splitting_off", action="store_true")
parser.add_argument("--disaggregated_joint", default=True, help="Options: True/False")
parser.add_argument("--no_dump_joint_fc2_weights", action="store_true")
parser.add_argument("--system_id", default="TitanRTX")
parser.add_argument("--scenario", default="Offline")
parser.add_argument("--calibrate_encoder", action="store_true", help="Overrides precision settings for encoder to int8. Must be used with --seq_splitting_off and --input_dtype=fp32. Ensure that max_seq_length is high enough for calibration data. Uses --calib_* parameters for configuration. Changes network description by expanding LSTMs in encoder")
parser.add_argument("--calib_max_batches", type=int, default=100)
parser.add_argument("--calib_batch_size", type=int, default=100)
parser.add_argument("--force_calibration", action="store_true")
parser.add_argument("--cache_file", type=str, default="code/rnnt/tensorrt/calibrator.cache")
parser.add_argument("--calib_data_map", type=str, default="build/preprocessed_data/rnnt_train_clean_512_fp32/val_map_512.txt")
parser.add_argument("--calib_data_dir", type=str, default="build/preprocessed_data/rnnt_train_clean_512_fp32/fp32")
args = vars(parser.parse_known_args()[0])
builder = RNNTBuilder(args)
builder.build_engines()
if __name__ == "__main__":
main()
| apache-2.0 |
olavo-digiorgi/mongo-web-shell | tests/__init__.py | 7 | 1316 | # Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import defaultTestLoader, TestCase
from mongows import create_app
app = create_app()
app.testing = True
app.config['QUOTA_NUM_COLLECTIONS'] = None
class MongoWSTestCase(TestCase):
"""A generic test case for the mongows package."""
def setUp(self):
self.real_app = app
self.app = app.test_client()
def tearDown(self):
pass
def load_tests(loader, tests, pattern):
"""Returns the test modules for the mongows package.
The expected output of this function is defined by the unittest module's
load_tests protocol. unittest.main() will runs tests on the modules
returned by this function.
"""
return defaultTestLoader.discover(__name__)
| apache-2.0 |
rynge/cvmfs-singularity-sync | cleanup.py | 1 | 5322 | #!/usr/bin/env python
"""
Cleanup for Singularity container
Scan the images in the singularity CVMFS. If an image directory has not been "linked" to for 2 days,
remove the image directory.
Maintains state in a file in the root singularity directory named .missing_links.json
"""
import glob
import os
import json
import shutil
import argparse
import time
from datetime import datetime, timedelta
# JSON structure:
# {
# "missing_links": {
# "/cvmfs/singularity.opensciencegrid.org/.images/7d/ba009871baa50e01d655a80f79728800401bbd0f5e7e18b5055839e713c09f": "<timestamp_last_linked>"
# ...
# }
# }
def remove_unlisted_images(current_images, singularity_base, test=False):
"""
Remove the images that are not in the current list
"""
# Get all the image paths
named_image_dirs = set()
for subdir, dirs, files in os.walk(singularity_base):
try:
images_index = dirs.index(".images")
del dirs[images_index]
except ValueError as ve:
pass
for directory in dirs:
path = os.path.join(subdir, directory)
if os.path.islink(path):
named_image_dirs.add(path)
# Compare the list of current images with the list of images from the FS
for image in current_images:
# Always has the registry as the first entry, remove it
image_dir = image.split('/', 1)[-1]
full_image_dir = os.path.join(singularity_base, image_dir)
if full_image_dir in named_image_dirs:
named_image_dirs.remove(full_image_dir)
# named_image_dirs should now only contain containers that are
# not in the images
for image_dir in named_image_dirs:
print("Removing deleted image: %s" % image_dir)
if not test:
try:
os.unlink(image_dir)
except OSError as e:
print("Failed to remove deleted image: %s" % e)
def cleanup(delay=2, test=False,
singularity_base='/cvmfs/singularity.opensciencegrid.org',
max_per_cycle=50):
'''Clean up unlinked singularity images'''
json_location = os.path.join(singularity_base, '.missing_links.json')
# Read in the old json, if it exists
json_missing_links = {}
try:
with open(json_location) as json_file:
json_missing_links = json.load(json_file)['missing_links']
except (IOError, ValueError):
# File is missing, unreadable, or damaged
pass
# Get all the images in the repo
# Walk the directory /cvmfs/singularity.opensciencegrid.org/.images/*
image_dirs = glob.glob(os.path.join(singularity_base, '.images/*/*'))
# Walk the named image dirs
named_image_dirs = []
for subdir, dirs, files in os.walk(singularity_base):
try:
images_index = dirs.index(".images")
del dirs[images_index]
except ValueError as ve:
pass
for directory in dirs:
path = os.path.join(subdir, directory)
if os.path.islink(path):
named_image_dirs.append(path)
# For named image dir, look at the what the symlink points at
for named_image in named_image_dirs:
link_target = os.readlink(named_image)
while link_target in image_dirs:
image_dirs.remove(link_target)
# Remove linked image from json (in case link is restored)
json_missing_links.pop(link_target, None)
# Now, for each image, see if it's in the json
for image_dir in image_dirs:
if image_dir not in json_missing_links:
# Add it to the json
print("Newly found missing link: %s" % (image_dir))
json_missing_links[image_dir] = int(time.time())
# Loop through the json missing links, removing directories if over the `delay` days
expiry = datetime.now() - timedelta(days=delay)
images_removed = 0
for image_dir, last_linked in list(json_missing_links.items()):
date_last_linked = datetime.fromtimestamp(last_linked)
if date_last_linked < expiry:
# Confirm that we're inside the managed directory
if not image_dir.startswith(singularity_base):
continue
# Remove the directory
print("Removing missing link: %s" % image_dir)
if not test:
try:
shutil.rmtree(image_dir)
del json_missing_links[image_dir]
except OSError as e:
print("Failed to remove missing link: %s" % e)
images_removed += 1
if images_removed >= max_per_cycle:
print("Reached limit of cleaning %d images. Stopping cleanup cycle." % images_removed)
break
# Write out the end json
with open(json_location, 'w') as json_file:
json.dump({"missing_links": json_missing_links}, json_file)
def main():
'''Main function'''
args = parse_args()
cleanup(test=args.test)
def parse_args():
'''Parse CLI options'''
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true',
help="Don't remove files, but go through the motions of removing them.")
return parser.parse_args()
if __name__ == "__main__":
main()
| apache-2.0 |
cheekiatng/titanium_mobile | support/iphone/provisioner.py | 34 | 3613 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Install a provisioning profile
#
import os, sys, subprocess, re, time, poorjson, types
from xml.dom.minidom import parseString
import codecs
from OpenSSL import crypto
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def make_map(dict):
props = {}
curkey = None
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
curkey = str(getText(i.childNodes)).strip()
elif i.nodeName == 'dict':
props[curkey] = make_map(i)
curkey = None
elif i.nodeName == 'array':
s = i.getElementsByTagName('string')
if len(s):
txt = ''
for t in s:
txt+=getText(t.childNodes)
props[curkey]=txt
else:
props[curkey]=None
curkey = None
else:
props[curkey] = getText(i.childNodes)
curkey = None
return props
def find_dict_element(dict,name):
found = False
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
if str(getText(i.childNodes)).strip() == name:
found = True
elif found:
return i
return None
def get_cert(dict):
certs_array = find_dict_element(dict, 'DeveloperCertificates')
if certs_array:
certs_array = certs_array.getElementsByTagName('data')
if not certs_array or not len(certs_array):
return None
cert_text = str(getText(certs_array[0].childNodes)).strip()
cert_text = "-----BEGIN CERTIFICATE-----\n" + cert_text + "\n-----END CERTIFICATE-----\n"
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_text)
return cert
def main(args):
if len(args)!=2:
print "%s <provisioning_file>" % os.path.basename(args[0])
sys.exit(1)
try:
xml = os.path.abspath(os.path.expanduser(dequote(args[1].decode("utf-8"))))
f = open(xml,'rb').read()
b = f.index('<?xml')
e = f.index('</plist>')
xml_content = f[b:e+8]
dom = parseString(xml_content)
dict = dom.getElementsByTagName('dict')[0]
props = make_map(dict)
profile_type = 'unknown'
if len(re.findall('ProvisionedDevices',xml_content)) > 0:
profile_type = 'development'
try:
cert = get_cert(dict)
if cert and re.search('Distribution:', cert.get_subject().commonName):
profile_type = 'adhoc'
except Exception, e:
sys.stderr.write('ERROR: %s\n' % str(e))
else:
profile_type = 'distribution'
name = props['Name']
name = name.decode('string_escape').decode('utf-8')
entitlements = props['Entitlements']
appid = entitlements['application-identifier']
appid_prefix = props['ApplicationIdentifierPrefix']
uuid = props['UUID']
bundle_id = appid.replace(appid_prefix+'.','')
# check to see if xcode is already running
output = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE).communicate()[0]
is_xcode = re.findall(r'Xcode.app',output)
xcode = len(is_xcode) > 0
# now we need to install the cert
# we essentially open xcode causing the cert to be installed
# automagically (but -g tells it to stay in the background)
cmd = "open -g \"%s\"" % xml
os.system(cmd)
# only kill Xcode if it wasn't already running
if xcode == False:
# give it a sec to install before killing it
time.sleep(1.5)
cmd = "killall Xcode"
os.system(cmd)
print poorjson.PoorJSON().dump({'type':profile_type,'appid':bundle_id, 'prefix':appid_prefix, 'name':name, 'uuid': uuid})
sys.exit(0)
except Exception, e:
print e
sys.exit(10)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
arokem/nipype | nipype/interfaces/fsl/tests/test_auto_WarpUtils.py | 9 | 1534 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import WarpUtils
def test_WarpUtils_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='--in=%s',
mandatory=True,
),
knot_space=dict(argstr='--knotspace=%d,%d,%d',
),
out_file=dict(argstr='--out=%s',
name_source=['in_file'],
output_name='out_file',
position=-1,
),
out_format=dict(argstr='--outformat=%s',
),
out_jacobian=dict(argstr='--jac=%s',
),
output_type=dict(),
reference=dict(argstr='--ref=%s',
mandatory=True,
),
terminal_output=dict(nohash=True,
),
warp_resolution=dict(argstr='--warpres=%0.4f,%0.4f,%0.4f',
),
with_affine=dict(argstr='--withaff',
),
write_jacobian=dict(mandatory=True,
usedefault=True,
),
)
inputs = WarpUtils.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_WarpUtils_outputs():
output_map = dict(out_file=dict(),
out_jacobian=dict(),
)
outputs = WarpUtils.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
masters3d/coursebuilder-masters3d | modules/announcements/samples.py | 32 | 3214 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample announcements."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
SAMPLE_ANNOUNCEMENT_1 = {
'edit_url': None,
'title': 'Example Announcement',
'date': datetime.date(2012, 10, 6),
'is_draft': False,
'html': """
<br>Certificates will be e-mailed to qualifying participants by
Friday, October 12.
<br>
<br>Do you want to check your assessment scores? Visit the
<a href="student/home">"My profile"</a> page!</p>
"""}
SAMPLE_ANNOUNCEMENT_2 = {
'edit_url': None,
'title': 'Welcome to Class 6 and the Post-class Assessment',
'date': datetime.date(2012, 10, 5),
'is_draft': True,
'html': """
<br>Welcome to the final class! <a href="class?class=6"> Class 6</a>
focuses on combining the skills you have learned throughout the class
to maximize the effectiveness of your searches.
<br>
<br><b>Customize Your Experience</b>
<br>You can customize your experience in several ways:
<ul>
<li>You can watch the videos multiple times for a deeper understanding
of each lesson. </li>
<li>You can read the text version for each lesson. Click the button
above the video to access it.</li>
<li>Lesson activities are designed for multiple levels of experience.
The first question checks your recall of the material in the video;
the second question lets you verify your mastery of the lesson; the
third question is an opportunity to apply your skills and share your
experiences in the class forums. You can answer some or all of the
questions depending on your familiarity and interest in the topic.
Activities are not graded and do not affect your final grade. </li>
<li>We'll also post extra challenges in the forums for people who seek
additional opportunities to practice and test their new skills!</li>
</ul>
<br><b>Forum</b>
<br>Apply your skills, share with others, and connect with your peers
and course staff in the <a href="forum">forum.</a> Discuss your favorite
search tips and troubleshoot technical issues. We'll also post bonus
videos and challenges there!
<p> </p>
<p>For an optimal learning experience, please plan to use the most
recent version of your browser, as well as a desktop, laptop or a tablet
computer instead of your mobile phone.</p>
"""}
SAMPLE_ANNOUNCEMENTS = [SAMPLE_ANNOUNCEMENT_1, SAMPLE_ANNOUNCEMENT_2]
| apache-2.0 |
alilotfi/django | tests/urlpatterns_reverse/views.py | 67 | 1466 | from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| bsd-3-clause |
mscuthbert/abjad | abjad/tools/topleveltools/show.py | 2 | 1083 | # -*- encoding: utf-8 -*-
def show(expr, return_timing=False, **kwargs):
r'''Shows `expr`.
.. container:: example
Shows a note:
::
>>> note = Note("c'4")
>>> show(note) # doctest: +SKIP
Abjad writes LilyPond input files to the ``~/.abjad/output/``
directory by default.
You may change this by setting the ``abjad_output_directory`` variable in
the Abjad ``config.py`` file.
Returns none when `return_timing` is false.
Returns pair of `abjad_formatting_time` and `lilypond_rendering_time`
when `return_timing` is true.
'''
from abjad.tools import systemtools
from abjad.tools import topleveltools
assert hasattr(expr, '__illustrate__')
result = topleveltools.persist(expr).as_pdf(**kwargs)
pdf_file_path = result[0]
abjad_formatting_time = result[1]
lilypond_rendering_time = result[2]
success = result[3]
if success:
systemtools.IOManager.open_file(pdf_file_path)
if return_timing:
return abjad_formatting_time, lilypond_rendering_time
| gpl-3.0 |
bratsche/Neutron-Drive | google_appengine/lib/PyAMF/pyamf/flex/messaging.py | 27 | 16511 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@since: 0.1
"""
import uuid
import pyamf.util
from pyamf import amf3
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage',
'AbstractMessage',
'AsyncMessage'
]
NAMESPACE = 'flex.messaging.messages'
SMALL_FLAG_MORE = 0x80
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with DS.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
class __amf__:
amf3 = True
static = ('body', 'clientId', 'destination', 'headers', 'messageId',
'timestamp', 'timeToLive')
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
#: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
SMALL_ATTRIBUTE_FLAGS = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
SMALL_ATTRIBUTES = dict(zip(
SMALL_ATTRIBUTE_FLAGS,
__amf__.static
))
SMALL_UUID_FLAGS = [0x01, 0x02]
SMALL_UUIDS = dict(zip(
SMALL_UUID_FLAGS,
['clientId', 'messageId']
))
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timestamp = kwargs.get('timestamp', None)
self.timeToLive = kwargs.get('timeToLive', None)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k in self.__dict__:
m += ' %s=%r' % (k, getattr(self, k))
return m + " />"
def decodeSmallAttribute(self, attr, input):
"""
@since: 0.5
"""
obj = input.readObject()
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_datetime(obj / 1000.0)
return obj
def encodeSmallAttribute(self, attr):
"""
@since: 0.5
"""
obj = getattr(self, attr)
if not obj:
return obj
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_timestamp(obj) * 1000.0
elif attr in ['clientId', 'messageId']:
if isinstance(obj, uuid.UUID):
return None
return obj
def __readamf__(self, input):
flags = read_flags(input)
if len(flags) > 2:
raise pyamf.DecodeError('Expected <=2 (got %d) flags for the '
'AbstractMessage portion of the small message for %r' % (
len(flags), self.__class__))
for index, byte in enumerate(flags):
if index == 0:
for flag in self.SMALL_ATTRIBUTE_FLAGS:
if flag & byte:
attr = self.SMALL_ATTRIBUTES[flag]
setattr(self, attr, self.decodeSmallAttribute(attr, input))
elif index == 1:
for flag in self.SMALL_UUID_FLAGS:
if flag & byte:
attr = self.SMALL_UUIDS[flag]
setattr(self, attr, decode_uuid(input.readObject()))
def __writeamf__(self, output):
flag_attrs = []
uuid_attrs = []
byte = 0
for flag in self.SMALL_ATTRIBUTE_FLAGS:
value = self.encodeSmallAttribute(self.SMALL_ATTRIBUTES[flag])
if value:
byte |= flag
flag_attrs.append(value)
flags = byte
byte = 0
for flag in self.SMALL_UUID_FLAGS:
attr = self.SMALL_UUIDS[flag]
value = getattr(self, attr)
if not value:
continue
byte |= flag
uuid_attrs.append(amf3.ByteArray(value.bytes))
if not byte:
output.writeUnsignedByte(flags)
else:
output.writeUnsignedByte(flags | SMALL_FLAG_MORE)
output.writeUnsignedByte(byte)
[output.writeObject(attr) for attr in flag_attrs]
[output.writeObject(attr) for attr in uuid_attrs]
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this object. If one is not
available, L{NotImplementedError} will be raised.
@since: 0.5
"""
raise NotImplementedError
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
class __amf__:
static = ('correlationId',)
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
def __readamf__(self, input):
AbstractMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AsyncMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.correlationId = input.readObject()
if byte & 0x02:
self.correlationId = decode_uuid(input.readObject())
def __writeamf__(self, output):
AbstractMessage.__writeamf__(self, output)
if not isinstance(self.correlationId, uuid.UUID):
output.writeUnsignedByte(0x01)
output.writeObject(self.correlationId)
else:
output.writeUnsignedByte(0x02)
output.writeObject(pyamf.amf3.ByteArray(self.correlationId.bytes))
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this async message.
@since: 0.5
"""
return AsyncMessageExt(**self.__dict__)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AcknowledgeMessage portion of the small message for %r' % (
len(flags), self.__class__))
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this acknowledge message.
@since: 0.5
"""
return AcknowledgeMessageExt(**self.__dict__)
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = "flex.messaging.messages.AuthenticationMessage"
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
#: This operation is used to indicate that a channel has disconnected.
DISCONNECT_OPERATION = 12
class __amf__:
static = ('operation',)
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if not flags:
return
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'CommandMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.operation = input.readObject()
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
if self.operation:
output.writeUnsignedByte(0x01)
output.writeObject(self.operation)
else:
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this command message.
@since: 0.5
"""
return CommandMessageExt(**self.__dict__)
class ErrorMessage(AcknowledgeMessage):
"""
I am the Flex error message to be returned to the client.
This class is used to report errors within the messaging system.
@see: U{ErrorMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/ErrorMessage.html>}
"""
#: If a message may not have been delivered, the faultCode will contain
#: this constant.
MESSAGE_DELIVERY_IN_DOUBT = "Client.Error.DeliveryInDoubt"
#: Header name for the retryable hint header.
#:
#: This is used to indicate that the operation that generated the error may
#: be retryable rather than fatal.
RETRYABLE_HINT_HEADER = "DSRetryableErrorHint"
class __amf__:
static = ('extendedData', 'faultCode', 'faultDetail', 'faultString',
'rootCause')
def __init__(self, *args, **kwargs):
AcknowledgeMessage.__init__(self, *args, **kwargs)
#: Extended data that the remote destination has chosen to associate
#: with this error to facilitate custom error processing on the client.
self.extendedData = kwargs.get('extendedData', {})
#: Fault code for the error.
self.faultCode = kwargs.get('faultCode', None)
#: Detailed description of what caused the error.
self.faultDetail = kwargs.get('faultDetail', None)
#: A simple description of the error.
self.faultString = kwargs.get('faultString', None)
#: Should a traceback exist for the error, this property contains the
#: message.
self.rootCause = kwargs.get('rootCause', {})
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this error message.
@since: 0.5
"""
raise NotImplementedError
class RemotingMessage(AbstractMessage):
"""
I am used to send RPC requests to a remote endpoint.
@see: U{RemotingMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/RemotingMessage.html>}
"""
class __amf__:
static = ('operation', 'source')
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
#: Name of the remote method/operation that should be called.
self.operation = kwargs.get('operation', None)
#: Name of the service to be called including package name.
#: This property is provided for backwards compatibility.
self.source = kwargs.get('source', None)
class AcknowledgeMessageExt(AcknowledgeMessage):
"""
An L{AcknowledgeMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class CommandMessageExt(CommandMessage):
"""
A L{CommandMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class AsyncMessageExt(AsyncMessage):
"""
A L{AsyncMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
def read_flags(input):
"""
@since: 0.5
"""
flags = []
done = False
while not done:
byte = input.readUnsignedByte()
if not byte & SMALL_FLAG_MORE:
done = True
else:
byte = byte ^ SMALL_FLAG_MORE
flags.append(byte)
return flags
def decode_uuid(obj):
"""
Decode a L{ByteArray} contents to a C{uuid.UUID} instance.
@since: 0.5
"""
return uuid.UUID(bytes=str(obj))
pyamf.register_package(globals(), package=NAMESPACE)
pyamf.register_class(AcknowledgeMessageExt, 'DSK')
pyamf.register_class(CommandMessageExt, 'DSC')
pyamf.register_class(AsyncMessageExt, 'DSA')
| bsd-3-clause |
farin/python-fakturoid | setup.py | 1 | 1437 | import os
from setuptools import setup
# work around to prevent http://bugs.python.org/issue15881 from showing up
try:
import multiprocessing
except ImportError:
pass
# def read(fname):
# return open(os.path.join(os.path.dirname(__file__), fname)).read()
long_description = """
fakturoid.cz Python API
=======================
The Python interface to online accounting service
`Fakturoid <http://fakturoid.cz/>`_.
See documentation on https://github.com/farin/python-fakturoid
"""
setup(
name='fakturoid',
version='1.5.1',
url="https://github.com/farin/python-fakturoid",
description='Python API for fakturoid.cz',
# long_description=read('README.md'),
long_description=long_description,
author='Roman Krejcik',
author_email='farin@farin.cz',
license='MIT',
platforms='any',
keywords=['fakturoid', 'accounting'],
packages=['fakturoid'],
install_requires=['requests', 'python-dateutil'],
tests_require=['mock'],
test_suite="tests",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Office/Business :: Financial :: Accounting",
],
)
| mit |
willingc/pip | pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| mit |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/click/_unicodefun.py | 19 | 4364 | import os
import sys
import codecs
from ._compat import PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
click = sys.modules[__name__.rsplit('.', 1)[0]]
def _find_unicode_literals_frame():
import __future__
if not hasattr(sys, '_getframe'): # not all Python implementations have it
return 0
frm = sys._getframe(1)
idx = 1
while frm is not None:
if frm.f_globals.get('__name__', '').startswith('click.'):
frm = frm.f_back
idx += 1
elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
return idx
else:
break
return 0
def _check_for_unicode_literals():
if not __debug__:
return
if not PY2 or click.disable_unicode_literals_warning:
return
bad_frame = _find_unicode_literals_frame()
if bad_frame <= 0:
return
from warnings import warn
warn(Warning('Click detected the use of the unicode_literals '
'__future__ import. This is heavily discouraged '
'because it can introduce subtle bugs in your '
'code. You should instead use explicit u"" literals '
'for your unicode strings. For more information see '
'https://click.palletsprojects.com/python3/'),
stacklevel=bad_frame)
def _verify_python3_env():
"""Ensures that the environment is good for unicode on Python 3."""
if PY2:
return
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = 'ascii'
if fs_enc != 'ascii':
return
extra = ''
if os.name == 'posix':
import subprocess
try:
rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
except OSError:
rv = b''
good_locales = set()
has_c_utf8 = False
# Make sure we're operating on text here.
if isinstance(rv, bytes):
rv = rv.decode('ascii', 'replace')
for line in rv.splitlines():
locale = line.strip()
if locale.lower().endswith(('.utf-8', '.utf8')):
good_locales.add(locale)
if locale.lower() in ('c.utf8', 'c.utf-8'):
has_c_utf8 = True
extra += '\n\n'
if not good_locales:
extra += (
'Additional information: on this system no suitable UTF-8\n'
'locales were discovered. This most likely requires resolving\n'
'by reconfiguring the locale system.'
)
elif has_c_utf8:
extra += (
'This system supports the C.UTF-8 locale which is recommended.\n'
'You might be able to resolve your issue by exporting the\n'
'following environment variables:\n\n'
' export LC_ALL=C.UTF-8\n'
' export LANG=C.UTF-8'
)
else:
extra += (
'This system lists a couple of UTF-8 supporting locales that\n'
'you can pick from. The following suitable locales were\n'
'discovered: %s'
) % ', '.join(sorted(good_locales))
bad_locale = None
for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
if locale and locale.lower().endswith(('.utf-8', '.utf8')):
bad_locale = locale
if locale is not None:
break
if bad_locale is not None:
extra += (
'\n\nClick discovered that you exported a UTF-8 locale\n'
'but the locale system could not pick up from it because\n'
'it does not exist. The exported locale is "%s" but it\n'
'is not supported'
) % bad_locale
raise RuntimeError(
'Click will abort further execution because Python 3 was'
' configured to use ASCII as encoding for the environment.'
' Consult https://click.palletsprojects.com/en/7.x/python3/ for'
' mitigation steps.' + extra
)
| apache-2.0 |
Don42/youtube-dl | youtube_dl/extractor/trutube.py | 147 | 1354 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import xpath_text
class TruTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
'info_dict': {
'id': '14880',
'ext': 'flv',
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
'thumbnail': 're:^http:.*\.jpg$',
}
}, {
'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
video_id, transform_source=lambda s: s.strip())
# filehd is always 404
video_url = xpath_text(config, './file', 'video URL', fatal=True)
title = xpath_text(config, './title', 'title').strip()
thumbnail = xpath_text(config, './image', ' thumbnail')
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
}
| unlicense |
gpospelov/BornAgain | ThirdParty/common/gtest/gtest-1.10.0/googlemock/test/gmock_test_utils.py | 88 | 3621 | # Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../../googletest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../googletest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
# pylint: disable=C6204
import gtest_test_utils
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| gpl-3.0 |
pombredanne/django-constance | constance/settings.py | 1 | 1230 | from django.conf import settings
BACKEND = getattr(settings, 'CONSTANCE_BACKEND',
'constance.backends.redisd.RedisBackend')
CONFIG = getattr(settings, 'CONSTANCE_CONFIG', {})
CONFIG_FIELDSETS = getattr(settings, 'CONSTANCE_CONFIG_FIELDSETS', {})
ADDITIONAL_FIELDS = getattr(settings, 'CONSTANCE_ADDITIONAL_FIELDS', {})
DATABASE_CACHE_BACKEND = getattr(settings, 'CONSTANCE_DATABASE_CACHE_BACKEND',
None)
DATABASE_CACHE_AUTOFILL_TIMEOUT = getattr(settings,
'CONSTANCE_DATABASE_CACHE_AUTOFILL_TIMEOUT',
60 * 60 * 24)
DATABASE_PREFIX = getattr(settings, 'CONSTANCE_DATABASE_PREFIX', '')
REDIS_PREFIX = getattr(settings, 'CONSTANCE_REDIS_PREFIX', 'constance:')
REDIS_CONNECTION_CLASS = getattr(settings, 'CONSTANCE_REDIS_CONNECTION_CLASS',
None)
REDIS_CONNECTION = getattr(settings, 'CONSTANCE_REDIS_CONNECTION', {})
SUPERUSER_ONLY = getattr(settings, 'CONSTANCE_SUPERUSER_ONLY', True)
IGNORE_ADMIN_VERSION_CHECK = getattr(settings,
'CONSTANCE_IGNORE_ADMIN_VERSION_CHECK',
False)
| bsd-3-clause |
holycrepe/anki | anki/sync.py | 15 | 29920 | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import urllib
import sys
import gzip
import random
from cStringIO import StringIO
import httplib2
from anki.db import DB
from anki.utils import ids2str, intTime, json, isWin, isMac, platDesc, checksum
from anki.consts import *
from hooks import runHook
import anki
from lang import ngettext
# syncing vars
HTTP_TIMEOUT = 90
HTTP_PROXY = None
# badly named; means no retries
httplib2.RETRIES = 1
try:
# httplib2 >=0.7.7
_proxy_info_from_environment = httplib2.proxy_info_from_environment
_proxy_info_from_url = httplib2.proxy_info_from_url
except AttributeError:
# httplib2 <0.7.7
_proxy_info_from_environment = httplib2.ProxyInfo.from_environment
_proxy_info_from_url = httplib2.ProxyInfo.from_url
# Httplib2 connection object
######################################################################
def httpCon():
certs = os.path.join(os.path.dirname(__file__), "ankiweb.certs")
if not os.path.exists(certs):
if isWin:
certs = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])),
"ankiweb.certs")
elif isMac:
certs = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])),
"../Resources/ankiweb.certs")
else:
assert 0, "Your distro has not packaged Anki correctly."
return httplib2.Http(
timeout=HTTP_TIMEOUT, ca_certs=certs,
proxy_info=HTTP_PROXY,
disable_ssl_certificate_validation=not not HTTP_PROXY)
# Proxy handling
######################################################################
def _setupProxy():
global HTTP_PROXY
# set in env?
p = _proxy_info_from_environment()
if not p:
# platform-specific fetch
url = None
if isWin:
r = urllib.getproxies_registry()
if 'https' in r:
url = r['https']
elif 'http' in r:
url = r['http']
elif isMac:
r = urllib.getproxies_macosx_sysconf()
if 'https' in r:
url = r['https']
elif 'http' in r:
url = r['http']
if url:
p = _proxy_info_from_url(url, _proxyMethod(url))
if p:
p.proxy_rdns = True
HTTP_PROXY = p
def _proxyMethod(url):
if url.lower().startswith("https"):
return "https"
else:
return "http"
_setupProxy()
# Incremental syncing
##########################################################################
class Syncer(object):
def __init__(self, col, server=None):
self.col = col
self.server = server
def sync(self):
"Returns 'noChanges', 'fullSync', 'success', etc"
self.syncMsg = ""
self.uname = ""
# if the deck has any pending changes, flush them first and bump mod
# time
self.col.save()
# step 1: login & metadata
runHook("sync", "login")
meta = self.server.meta()
self.col.log("rmeta", meta)
if not meta:
return "badAuth"
# server requested abort?
self.syncMsg = meta['msg']
if not meta['cont']:
return "serverAbort"
else:
# don't abort, but if 'msg' is not blank, gui should show 'msg'
# after sync finishes and wait for confirmation before hiding
pass
rscm = meta['scm']
rts = meta['ts']
self.rmod = meta['mod']
self.maxUsn = meta['usn']
# this is a temporary measure to address the problem of users
# forgetting which email address they've used - it will be removed
# when enough time has passed
self.uname = meta.get("uname", "")
meta = self.meta()
self.col.log("lmeta", meta)
self.lmod = meta['mod']
self.minUsn = meta['usn']
lscm = meta['scm']
lts = meta['ts']
if abs(rts - lts) > 300:
self.col.log("clock off")
return "clockOff"
if self.lmod == self.rmod:
self.col.log("no changes")
return "noChanges"
elif lscm != rscm:
self.col.log("schema diff")
return "fullSync"
self.lnewer = self.lmod > self.rmod
# step 1.5: check collection is valid
if not self.col.basicCheck():
self.col.log("basic check")
return "basicCheckFailed"
# step 2: deletions
runHook("sync", "meta")
lrem = self.removed()
rrem = self.server.start(
minUsn=self.minUsn, lnewer=self.lnewer, graves=lrem)
self.remove(rrem)
# ...and small objects
lchg = self.changes()
rchg = self.server.applyChanges(changes=lchg)
self.mergeChanges(lchg, rchg)
# step 3: stream large tables from server
runHook("sync", "server")
while 1:
runHook("sync", "stream")
chunk = self.server.chunk()
self.col.log("server chunk", chunk)
self.applyChunk(chunk=chunk)
if chunk['done']:
break
# step 4: stream to server
runHook("sync", "client")
while 1:
runHook("sync", "stream")
chunk = self.chunk()
self.col.log("client chunk", chunk)
self.server.applyChunk(chunk=chunk)
if chunk['done']:
break
# step 5: sanity check
runHook("sync", "sanity")
c = self.sanityCheck()
ret = self.server.sanityCheck2(client=c)
if ret['status'] != "ok":
# roll back and force full sync
self.col.rollback()
self.col.modSchema(False)
self.col.save()
return "sanityCheckFailed"
# finalize
runHook("sync", "finalize")
mod = self.server.finish()
self.finish(mod)
return "success"
def meta(self):
return dict(
mod=self.col.mod,
scm=self.col.scm,
usn=self.col._usn,
ts=intTime(),
musn=0,
msg="",
cont=True
)
def changes(self):
"Bundle up small objects."
d = dict(models=self.getModels(),
decks=self.getDecks(),
tags=self.getTags())
if self.lnewer:
d['conf'] = self.getConf()
d['crt'] = self.col.crt
return d
def applyChanges(self, changes):
self.rchg = changes
lchg = self.changes()
# merge our side before returning
self.mergeChanges(lchg, self.rchg)
return lchg
def mergeChanges(self, lchg, rchg):
# then the other objects
self.mergeModels(rchg['models'])
self.mergeDecks(rchg['decks'])
self.mergeTags(rchg['tags'])
if 'conf' in rchg:
self.mergeConf(rchg['conf'])
# this was left out of earlier betas
if 'crt' in rchg:
self.col.crt = rchg['crt']
self.prepareToChunk()
def sanityCheck(self):
if not self.col.basicCheck():
return "failed basic check"
for t in "cards", "notes", "revlog", "graves":
if self.col.db.scalar(
"select count() from %s where usn = -1" % t):
return "%s had usn = -1" % t
for g in self.col.decks.all():
if g['usn'] == -1:
return "deck had usn = -1"
for t, usn in self.col.tags.allItems():
if usn == -1:
return "tag had usn = -1"
found = False
for m in self.col.models.all():
if self.col.server:
# the web upgrade was mistakenly setting usn
if m['usn'] < 0:
m['usn'] = 0
found = True
else:
if m['usn'] == -1:
return "model had usn = -1"
if found:
self.col.models.save()
self.col.sched.reset()
# check for missing parent decks
self.col.sched.deckDueList()
# return summary of deck
return [
list(self.col.sched.counts()),
self.col.db.scalar("select count() from cards"),
self.col.db.scalar("select count() from notes"),
self.col.db.scalar("select count() from revlog"),
self.col.db.scalar("select count() from graves"),
len(self.col.models.all()),
len(self.col.decks.all()),
len(self.col.decks.allConf()),
]
def sanityCheck2(self, client):
server = self.sanityCheck()
if client != server:
return dict(status="bad", c=client, s=server)
return dict(status="ok")
def usnLim(self):
if self.col.server:
return "usn >= %d" % self.minUsn
else:
return "usn = -1"
def finish(self, mod=None):
if not mod:
# server side; we decide new mod time
mod = intTime(1000)
self.col.ls = mod
self.col._usn = self.maxUsn + 1
# ensure we save the mod time even if no changes made
self.col.db.mod = True
self.col.save(mod=mod)
return mod
# Chunked syncing
##########################################################################
def prepareToChunk(self):
self.tablesLeft = ["revlog", "cards", "notes"]
self.cursor = None
def cursorForTable(self, table):
lim = self.usnLim()
x = self.col.db.execute
d = (self.maxUsn, lim)
if table == "revlog":
return x("""
select id, cid, %d, ease, ivl, lastIvl, factor, time, type
from revlog where %s""" % d)
elif table == "cards":
return x("""
select id, nid, did, ord, mod, %d, type, queue, due, ivl, factor, reps,
lapses, left, odue, odid, flags, data from cards where %s""" % d)
else:
return x("""
select id, guid, mid, mod, %d, tags, flds, '', '', flags, data
from notes where %s""" % d)
def chunk(self):
buf = dict(done=False)
lim = 250
while self.tablesLeft and lim:
curTable = self.tablesLeft[0]
if not self.cursor:
self.cursor = self.cursorForTable(curTable)
rows = self.cursor.fetchmany(lim)
fetched = len(rows)
if fetched != lim:
# table is empty
self.tablesLeft.pop(0)
self.cursor = None
# if we're the client, mark the objects as having been sent
if not self.col.server:
self.col.db.execute(
"update %s set usn=? where usn=-1"%curTable,
self.maxUsn)
buf[curTable] = rows
lim -= fetched
if not self.tablesLeft:
buf['done'] = True
return buf
def applyChunk(self, chunk):
if "revlog" in chunk:
self.mergeRevlog(chunk['revlog'])
if "cards" in chunk:
self.mergeCards(chunk['cards'])
if "notes" in chunk:
self.mergeNotes(chunk['notes'])
# Deletions
##########################################################################
def removed(self):
cards = []
notes = []
decks = []
if self.col.server:
curs = self.col.db.execute(
"select oid, type from graves where usn >= ?", self.minUsn)
else:
curs = self.col.db.execute(
"select oid, type from graves where usn = -1")
for oid, type in curs:
if type == REM_CARD:
cards.append(oid)
elif type == REM_NOTE:
notes.append(oid)
else:
decks.append(oid)
if not self.col.server:
self.col.db.execute("update graves set usn=? where usn=-1",
self.maxUsn)
return dict(cards=cards, notes=notes, decks=decks)
def start(self, minUsn, lnewer, graves):
self.maxUsn = self.col._usn
self.minUsn = minUsn
self.lnewer = not lnewer
lgraves = self.removed()
self.remove(graves)
return lgraves
def remove(self, graves):
# pretend to be the server so we don't set usn = -1
wasServer = self.col.server
self.col.server = True
# notes first, so we don't end up with duplicate graves
self.col._remNotes(graves['notes'])
# then cards
self.col.remCards(graves['cards'], notes=False)
# and decks
for oid in graves['decks']:
self.col.decks.rem(oid, childrenToo=False)
self.col.server = wasServer
# Models
##########################################################################
def getModels(self):
if self.col.server:
return [m for m in self.col.models.all() if m['usn'] >= self.minUsn]
else:
mods = [m for m in self.col.models.all() if m['usn'] == -1]
for m in mods:
m['usn'] = self.maxUsn
self.col.models.save()
return mods
def mergeModels(self, rchg):
for r in rchg:
l = self.col.models.get(r['id'])
# if missing locally or server is newer, update
if not l or r['mod'] > l['mod']:
self.col.models.update(r)
# Decks
##########################################################################
def getDecks(self):
if self.col.server:
return [
[g for g in self.col.decks.all() if g['usn'] >= self.minUsn],
[g for g in self.col.decks.allConf() if g['usn'] >= self.minUsn]
]
else:
decks = [g for g in self.col.decks.all() if g['usn'] == -1]
for g in decks:
g['usn'] = self.maxUsn
dconf = [g for g in self.col.decks.allConf() if g['usn'] == -1]
for g in dconf:
g['usn'] = self.maxUsn
self.col.decks.save()
return [decks, dconf]
def mergeDecks(self, rchg):
for r in rchg[0]:
l = self.col.decks.get(r['id'], False)
# if missing locally or server is newer, update
if not l or r['mod'] > l['mod']:
self.col.decks.update(r)
for r in rchg[1]:
try:
l = self.col.decks.getConf(r['id'])
except KeyError:
l = None
# if missing locally or server is newer, update
if not l or r['mod'] > l['mod']:
self.col.decks.updateConf(r)
# Tags
##########################################################################
def getTags(self):
if self.col.server:
return [t for t, usn in self.col.tags.allItems()
if usn >= self.minUsn]
else:
tags = []
for t, usn in self.col.tags.allItems():
if usn == -1:
self.col.tags.tags[t] = self.maxUsn
tags.append(t)
self.col.tags.save()
return tags
def mergeTags(self, tags):
self.col.tags.register(tags, usn=self.maxUsn)
# Cards/notes/revlog
##########################################################################
def mergeRevlog(self, logs):
self.col.db.executemany(
"insert or ignore into revlog values (?,?,?,?,?,?,?,?,?)",
logs)
def newerRows(self, data, table, modIdx):
ids = (r[0] for r in data)
lmods = {}
for id, mod in self.col.db.execute(
"select id, mod from %s where id in %s and %s" % (
table, ids2str(ids), self.usnLim())):
lmods[id] = mod
update = []
for r in data:
if r[0] not in lmods or lmods[r[0]] < r[modIdx]:
update.append(r)
self.col.log(table, data)
return update
def mergeCards(self, cards):
self.col.db.executemany(
"insert or replace into cards values "
"(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
self.newerRows(cards, "cards", 4))
def mergeNotes(self, notes):
rows = self.newerRows(notes, "notes", 3)
self.col.db.executemany(
"insert or replace into notes values (?,?,?,?,?,?,?,?,?,?,?)",
rows)
self.col.updateFieldCache([f[0] for f in rows])
# Col config
##########################################################################
def getConf(self):
return self.col.conf
def mergeConf(self, conf):
self.col.conf = conf
# Local syncing for unit tests
##########################################################################
class LocalServer(Syncer):
# serialize/deserialize payload, so we don't end up sharing objects
# between cols
def applyChanges(self, changes):
l = json.loads; d = json.dumps
return l(d(Syncer.applyChanges(self, l(d(changes)))))
# HTTP syncing tools
##########################################################################
# Calling code should catch the following codes:
# - 501: client needs upgrade
# - 502: ankiweb down
# - 503/504: server too busy
class HttpSyncer(object):
def __init__(self, hkey=None, con=None):
self.hkey = hkey
self.skey = checksum(str(random.random()))[:8]
self.con = con or httpCon()
self.postVars = {}
def assertOk(self, resp):
if resp['status'] != '200':
raise Exception("Unknown response code: %s" % resp['status'])
# Posting data as a file
######################################################################
# We don't want to post the payload as a form var, as the percent-encoding is
# costly. We could send it as a raw post, but more HTTP clients seem to
# support file uploading, so this is the more compatible choice.
def req(self, method, fobj=None, comp=6, badAuthRaises=False):
BOUNDARY="Anki-sync-boundary"
bdry = "--"+BOUNDARY
buf = StringIO()
# post vars
self.postVars['c'] = 1 if comp else 0
for (key, value) in self.postVars.items():
buf.write(bdry + "\r\n")
buf.write(
'Content-Disposition: form-data; name="%s"\r\n\r\n%s\r\n' %
(key, value))
# payload as raw data or json
if fobj:
# header
buf.write(bdry + "\r\n")
buf.write("""\
Content-Disposition: form-data; name="data"; filename="data"\r\n\
Content-Type: application/octet-stream\r\n\r\n""")
# write file into buffer, optionally compressing
if comp:
tgt = gzip.GzipFile(mode="wb", fileobj=buf, compresslevel=comp)
else:
tgt = buf
while 1:
data = fobj.read(65536)
if not data:
if comp:
tgt.close()
break
tgt.write(data)
buf.write('\r\n' + bdry + '--\r\n')
size = buf.tell()
# connection headers
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % BOUNDARY,
'Content-Length': str(size),
}
body = buf.getvalue()
buf.close()
resp, cont = self.con.request(
self.syncURL()+method, "POST", headers=headers, body=body)
if not badAuthRaises:
# return false if bad auth instead of raising
if resp['status'] == '403':
return False
self.assertOk(resp)
return cont
# Incremental sync over HTTP
######################################################################
class RemoteServer(HttpSyncer):
def __init__(self, hkey):
HttpSyncer.__init__(self, hkey)
def syncURL(self):
if os.getenv("ANKIDEV"):
return "https://l1.ankiweb.net/sync/"
return SYNC_BASE + "sync/"
def hostKey(self, user, pw):
"Returns hkey or none if user/pw incorrect."
self.postVars = dict()
ret = self.req(
"hostKey", StringIO(json.dumps(dict(u=user, p=pw))),
badAuthRaises=False)
if not ret:
# invalid auth
return
self.hkey = json.loads(ret)['key']
return self.hkey
def meta(self):
self.postVars = dict(
k=self.hkey,
s=self.skey,
)
ret = self.req(
"meta", StringIO(json.dumps(dict(
v=SYNC_VER, cv="ankidesktop,%s,%s"%(anki.version, platDesc())))),
badAuthRaises=False)
if not ret:
# invalid auth
return
return json.loads(ret)
def applyChanges(self, **kw):
return self._run("applyChanges", kw)
def start(self, **kw):
return self._run("start", kw)
def chunk(self, **kw):
return self._run("chunk", kw)
def applyChunk(self, **kw):
return self._run("applyChunk", kw)
def sanityCheck2(self, **kw):
return self._run("sanityCheck2", kw)
def finish(self, **kw):
return self._run("finish", kw)
def _run(self, cmd, data):
return json.loads(
self.req(cmd, StringIO(json.dumps(data))))
# Full syncing
##########################################################################
class FullSyncer(HttpSyncer):
def __init__(self, col, hkey, con):
HttpSyncer.__init__(self, hkey, con)
self.postVars = dict(
k=self.hkey,
v="ankidesktop,%s,%s"%(anki.version, platDesc()),
)
self.col = col
def syncURL(self):
if os.getenv("ANKIDEV"):
return "https://l1.ankiweb.net/sync/"
return SYNC_BASE + "sync/"
def download(self):
runHook("sync", "download")
self.col.close()
cont = self.req("download")
tpath = self.col.path + ".tmp"
if cont == "upgradeRequired":
runHook("sync", "upgradeRequired")
return
open(tpath, "wb").write(cont)
# check the received file is ok
d = DB(tpath)
assert d.scalar("pragma integrity_check") == "ok"
d.close()
# overwrite existing collection
os.unlink(self.col.path)
os.rename(tpath, self.col.path)
self.col = None
def upload(self):
"True if upload successful."
runHook("sync", "upload")
# make sure it's ok before we try to upload
if self.col.db.scalar("pragma integrity_check") != "ok":
return False
if not self.col.basicCheck():
return False
# apply some adjustments, then upload
self.col.beforeUpload()
if self.req("upload", open(self.col.path, "rb")) != "OK":
return False
return True
# Media syncing
##########################################################################
#
# About conflicts:
# - to minimize data loss, if both sides are marked for sending and one
# side has been deleted, favour the add
# - if added/changed on both sides, favour the server version on the
# assumption other syncers are in sync with the server
#
class MediaSyncer(object):
def __init__(self, col, server=None):
self.col = col
self.server = server
def sync(self):
# check if there have been any changes
runHook("sync", "findMedia")
self.col.log("findChanges")
self.col.media.findChanges()
# begin session and check if in sync
lastUsn = self.col.media.lastUsn()
ret = self.server.begin()
srvUsn = ret['usn']
if lastUsn == srvUsn and not self.col.media.haveDirty():
return "noChanges"
# loop through and process changes from server
self.col.log("last local usn is %s"%lastUsn)
self.downloadCount = 0
while True:
data = self.server.mediaChanges(lastUsn=lastUsn)
self.col.log("mediaChanges resp count %d"%len(data))
if not data:
break
need = []
lastUsn = data[-1][1]
for fname, rusn, rsum in data:
lsum, ldirty = self.col.media.syncInfo(fname)
self.col.log(
"check: lsum=%s rsum=%s ldirty=%d rusn=%d fname=%s"%(
(lsum and lsum[0:4]),
(rsum and rsum[0:4]),
ldirty,
rusn,
fname))
if rsum:
# added/changed remotely
if not lsum or lsum != rsum:
self.col.log("will fetch")
need.append(fname)
else:
self.col.log("have same already")
ldirty and self.col.media.markClean([fname])
elif lsum:
# deleted remotely
if not ldirty:
self.col.log("delete local")
self.col.media.syncDelete(fname)
else:
# conflict; local add overrides remote delete
self.col.log("conflict; will send")
else:
# deleted both sides
self.col.log("both sides deleted")
ldirty and self.col.media.markClean([fname])
self._downloadFiles(need)
self.col.log("update last usn to %d"%lastUsn)
self.col.media.setLastUsn(lastUsn) # commits
# at this point we're all up to date with the server's changes,
# and we need to send our own
updateConflict = False
toSend = self.col.media.dirtyCount()
while True:
zip, fnames = self.col.media.mediaChangesZip()
if not fnames:
break
runHook("syncMsg", ngettext(
"%d media change to upload", "%d media changes to upload", toSend)
% toSend)
processedCnt, serverLastUsn = self.server.uploadChanges(zip)
self.col.media.markClean(fnames[0:processedCnt])
self.col.log("processed %d, serverUsn %d, clientUsn %d" % (
processedCnt, serverLastUsn, lastUsn
))
if serverLastUsn - processedCnt == lastUsn:
self.col.log("lastUsn in sync, updating local")
lastUsn = serverLastUsn
self.col.media.setLastUsn(serverLastUsn) # commits
else:
self.col.log("concurrent update, skipping usn update")
# commit for markClean
self.col.media.db.commit()
updateConflict = True
toSend -= processedCnt
if updateConflict:
self.col.log("restart sync due to concurrent update")
return self.sync()
lcnt = self.col.media.mediaCount()
ret = self.server.mediaSanity(local=lcnt)
if ret == "OK":
return "OK"
else:
self.col.media.forceResync()
return ret
def _downloadFiles(self, fnames):
self.col.log("%d files to fetch"%len(fnames))
while fnames:
top = fnames[0:SYNC_ZIP_COUNT]
self.col.log("fetch %s"%top)
zipData = self.server.downloadFiles(files=top)
cnt = self.col.media.addFilesFromZip(zipData)
self.downloadCount += cnt
self.col.log("received %d files"%cnt)
fnames = fnames[cnt:]
n = self.downloadCount
runHook("syncMsg", ngettext(
"%d media file downloaded", "%d media files downloaded", n)
% n)
# Remote media syncing
##########################################################################
class RemoteMediaServer(HttpSyncer):
def __init__(self, col, hkey, con):
self.col = col
HttpSyncer.__init__(self, hkey, con)
def syncURL(self):
if os.getenv("ANKIDEV"):
return "https://l1.ankiweb.net/msync/"
return SYNC_MEDIA_BASE
def begin(self):
self.postVars = dict(
k=self.hkey,
v="ankidesktop,%s,%s"%(anki.version, platDesc())
)
ret = self._dataOnly(json.loads(self.req(
"begin", StringIO(json.dumps(dict())))))
self.skey = ret['sk']
return ret
# args: lastUsn
def mediaChanges(self, **kw):
self.postVars = dict(
sk=self.skey,
)
resp = json.loads(
self.req("mediaChanges", StringIO(json.dumps(kw))))
return self._dataOnly(resp)
# args: files
def downloadFiles(self, **kw):
return self.req("downloadFiles", StringIO(json.dumps(kw)))
def uploadChanges(self, zip):
# no compression, as we compress the zip file instead
return self._dataOnly(json.loads(
self.req("uploadChanges", StringIO(zip), comp=0)))
# args: local
def mediaSanity(self, **kw):
return self._dataOnly(json.loads(
self.req("mediaSanity", StringIO(json.dumps(kw)))))
def _dataOnly(self, resp):
if resp['err']:
self.col.log("error returned:%s"%resp['err'])
raise Exception("SyncError:%s"%resp['err'])
return resp['data']
# only for unit tests
def mediatest(self, cmd):
self.postVars = dict(
k=self.hkey,
)
return self._dataOnly(json.loads(
self.req("newMediaTest", StringIO(
json.dumps(dict(cmd=cmd))))))
| agpl-3.0 |
ngageoint/scale | scale/node/test/resources/test_gpu_manager.py | 1 | 4435 | import django
from django.test import TestCase
from node.resources.gpu_manager import GPUManager
class test_GPUManager(TestCase):
def setUp(self):
GPUManager.reset_gpu_dict()
def test_add_new_node_gpus(self):
node_id = 1
gpu_count = 3
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertEqual(GPUManager.get_gpu_count_for_node(node_id), gpu_count)
def test_add_less_gpu(self):
node_id = 2
gpu_count = 3
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertEqual(GPUManager.get_gpu_count_for_node(node_id), 3)
gpu_count = 1
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertEqual(GPUManager.get_gpu_count_for_node(node_id), 3)
def test_add_additional_GPU(self):
node_id = 3
gpu_count = 4
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertEqual(GPUManager.get_gpu_count_for_node(node_id), 4)
def test_reserve_gpu(self):
node_id = 4
gpu_count = 2
required_gpus = 2
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertTrue(GPUManager.reserve_gpus_for_job(node_id, required_gpus))
job_id = 11
self.assertFalse(GPUManager.reserve_gpus_for_job(node_id, required_gpus))
gpu_count = 4
GPUManager.define_node_gpus(node_id,gpu_count)
self.assertTrue(GPUManager.reserve_gpus_for_job(node_id, required_gpus))
def test_assign_gpus(self):
node_id = 5
job_id = 10
gpu_count = 2
required_gpus = 2
GPUManager.define_node_gpus(node_id,gpu_count)
GPUManager.reserve_gpus_for_job(node_id, required_gpus)
self.assertTrue(GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus))
job_id = 11
self.assertFalse(GPUManager.reserve_gpus_for_job(node_id, required_gpus)) # shouldnt have enough GPUs
gpu_count = 4
GPUManager.define_node_gpus(node_id,gpu_count)
GPUManager.reserve_gpus_for_job(node_id, required_gpus)
self.assertTrue(GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus))
def test_get_nvidia_label(self):
node_id = 6
job_id = 10
gpu_count = 2
required_gpus = 2
GPUManager.define_node_gpus(node_id,gpu_count)
GPUManager.reserve_gpus_for_job(node_id, required_gpus)
GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus)
nvidia_label = GPUManager.get_nvidia_docker_label(node_id, job_id)
self.assertEqual(nvidia_label, "0,1")
gpu_count = 4
job_id = 11
GPUManager.define_node_gpus(node_id, gpu_count)
GPUManager.reserve_gpus_for_job(node_id, required_gpus)
GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus)
nvidia_label = GPUManager.get_nvidia_docker_label(node_id, job_id)
self.assertEqual(nvidia_label, "2,3")
def test_release_gpu(self):
node_id = 7
job_id = 10
gpu_count = 2
required_gpus = 2
GPUManager.define_node_gpus(node_id,gpu_count)
GPUManager.reserve_gpus_for_job(node_id, required_gpus)
self.assertTrue(GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus))
job_id = 11
self.assertFalse(GPUManager.reserve_gpus_for_job(node_id, required_gpus)) # shouldnt have enough GPUs
GPUManager.release_gpus(node_id, 10)
self.assertTrue(GPUManager.reserve_gpus_for_job(node_id, required_gpus)) #gpus should be avail again
self.assertTrue(GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus)) #gpus should be avail again
nvidia_label = GPUManager.get_nvidia_docker_label(node_id, job_id)
self.assertEqual(nvidia_label, "0,1")
def test_calls_where_node_has_no_gpus(self):
node_id = 7
job_id = 10
gpu_count = 2
required_gpus = 2
GPUManager.define_node_gpus(node_id,gpu_count)
node_id = 8
self.assertFalse(GPUManager.reserve_gpus_for_job(node_id, required_gpus))
self.assertFalse(GPUManager.assign_gpus_for_job(node_id, job_id, required_gpus))
nvidia_label = GPUManager.get_nvidia_docker_label(node_id, job_id)
self.assertEqual(nvidia_label, "")
| apache-2.0 |
js0701/chromium-crosswalk | tools/grit/grit/gather/tr_html.py | 61 | 27320 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A gatherer for the TotalRecall brand of HTML templates with replaceable
portions. We wanted to reuse extern.tclib.api.handlers.html.TCHTMLParser
but this proved impossible due to the fact that the TotalRecall HTML templates
are in general quite far from parseable HTML and the TCHTMLParser derives
from HTMLParser.HTMLParser which requires relatively well-formed HTML. Some
examples of "HTML" from the TotalRecall HTML templates that wouldn't be
parseable include things like:
<a [PARAMS]>blabla</a> (not parseable because attributes are invalid)
<table><tr><td>[LOTSOFSTUFF]</tr></table> (not parseable because closing
</td> is in the HTML [LOTSOFSTUFF]
is replaced by)
The other problem with using general parsers (such as TCHTMLParser) is that
we want to make sure we output the TotalRecall template with as little changes
as possible in terms of whitespace characters, layout etc. With any parser
that generates a parse tree, and generates output by dumping the parse tree,
we would always have little inconsistencies which could cause bugs (the
TotalRecall template stuff is quite brittle and can break if e.g. a tab
character is replaced with spaces).
The solution, which may be applicable to some other HTML-like template
languages floating around Google, is to create a parser with a simple state
machine that keeps track of what kind of tag it's inside, and whether it's in
a translateable section or not. Translateable sections are:
a) text (including [BINGO] replaceables) inside of tags that
can contain translateable text (which is all tags except
for a few)
b) text inside of an 'alt' attribute in an <image> element, or
the 'value' attribute of a <submit>, <button> or <text>
element.
The parser does not build up a parse tree but rather a "skeleton" which
is a list of nontranslateable strings intermingled with grit.clique.MessageClique
objects. This simplifies the parser considerably compared to a regular HTML
parser. To output a translated document, each item in the skeleton is
printed out, with the relevant Translation from each MessageCliques being used
for the requested language.
This implementation borrows some code, constants and ideas from
extern.tclib.api.handlers.html.TCHTMLParser.
'''
import re
import types
from grit import clique
from grit import exception
from grit import lazy_re
from grit import util
from grit import tclib
from grit.gather import interface
# HTML tags which break (separate) chunks.
_BLOCK_TAGS = ['script', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'br',
'body', 'style', 'head', 'title', 'table', 'tr', 'td', 'th',
'ul', 'ol', 'dl', 'nl', 'li', 'div', 'object', 'center',
'html', 'link', 'form', 'select', 'textarea',
'button', 'option', 'map', 'area', 'blockquote', 'pre',
'meta', 'xmp', 'noscript', 'label', 'tbody', 'thead',
'script', 'style', 'pre', 'iframe', 'img', 'input', 'nowrap',
'fieldset', 'legend']
# HTML tags which may appear within a chunk.
_INLINE_TAGS = ['b', 'i', 'u', 'tt', 'code', 'font', 'a', 'span', 'small',
'key', 'nobr', 'url', 'em', 's', 'sup', 'strike',
'strong']
# HTML tags within which linebreaks are significant.
_PREFORMATTED_TAGS = ['textarea', 'xmp', 'pre']
# An array mapping some of the inline HTML tags to more meaningful
# names for those tags. This will be used when generating placeholders
# representing these tags.
_HTML_PLACEHOLDER_NAMES = { 'a' : 'link', 'br' : 'break', 'b' : 'bold',
'i' : 'italic', 'li' : 'item', 'ol' : 'ordered_list', 'p' : 'paragraph',
'ul' : 'unordered_list', 'img' : 'image', 'em' : 'emphasis' }
# We append each of these characters in sequence to distinguish between
# different placeholders with basically the same name (e.g. BOLD1, BOLD2).
# Keep in mind that a placeholder name must not be a substring of any other
# placeholder name in the same message, so we can't simply count (BOLD_1
# would be a substring of BOLD_10).
_SUFFIXES = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Matches whitespace in an HTML document. Also matches HTML comments, which are
# treated as whitespace.
_WHITESPACE = lazy_re.compile(r'(\s| |\\n|\\r|<!--\s*desc\s*=.*?-->)+',
re.DOTALL)
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = lazy_re.compile(r'\s+')
# Finds a non-whitespace character
_NON_WHITESPACE = lazy_re.compile(r'\S')
# Matches two or more in a row (a single   is not changed into
# placeholders because different languages require different numbers of spaces
# and placeholders must match exactly; more than one is probably a "special"
# whitespace sequence and should be turned into a placeholder).
_NBSP = lazy_re.compile(r' ( )+')
# Matches nontranslateable chunks of the document
_NONTRANSLATEABLES = lazy_re.compile(r'''
<\s*script.+?<\s*/\s*script\s*>
|
<\s*style.+?<\s*/\s*style\s*>
|
<!--.+?-->
|
<\?IMPORT\s.+?> # import tag
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
<\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches a tag and its attributes
_ELEMENT = lazy_re.compile(r'''
# Optional closing /, element name
<\s*(?P<closing>/)?\s*(?P<element>[a-zA-Z0-9]+)\s*
# Attributes and/or replaceables inside the tag, if any
(?P<atts>(
\s*([a-zA-Z_][-:.a-zA-Z_0-9]*) # Attribute name
(\s*=\s*(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?
|
\s*\[(\$?\~)?([A-Z0-9-_]+?)(\~\$?)?\]
)*)
\s*(?P<empty>/)?\s*> # Optional empty-tag closing /, and tag close
''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches elements that may have translateable attributes. The value of these
# special attributes is given by group 'value1' or 'value2'. Note that this
# regexp demands that the attribute value be quoted; this is necessary because
# the non-tree-building nature of the parser means we don't know when we're
# writing out attributes, so we wouldn't know to escape spaces.
_SPECIAL_ELEMENT = lazy_re.compile(r'''
<\s*(
input[^>]+?value\s*=\s*(\'(?P<value3>[^\']*)\'|"(?P<value4>[^"]*)")
[^>]+type\s*=\s*"?'?(button|reset|text|submit)'?"?
|
(
table[^>]+?title\s*=
|
img[^>]+?alt\s*=
|
input[^>]+?type\s*=\s*"?'?(button|reset|text|submit)'?"?[^>]+?value\s*=
)
\s*(\'(?P<value1>[^\']*)\'|"(?P<value2>[^"]*)")
)[^>]*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches stuff that is translateable if it occurs in the right context
# (between tags). This includes all characters and character entities.
# Note that this also matches which needs to be handled as whitespace
# before this regexp is applied.
_CHARACTERS = lazy_re.compile(r'''
(
\w
|
[\!\@\#\$\%\^\*\(\)\-\=\_\+\[\]\{\}\\\|\;\:\'\"\,\.\/\?\`\~]
|
&(\#[0-9]+|\#x[0-9a-fA-F]+|[A-Za-z0-9]+);
)+
''', re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches Total Recall's "replaceable" tags, which are just any text
# in capitals enclosed by delimiters like [] or [~~] or [$~~$] (e.g. [HELLO],
# [~HELLO~] and [$~HELLO~$]).
_REPLACEABLE = lazy_re.compile(r'\[(\$?\~)?(?P<name>[A-Z0-9-_]+?)(\~\$?)?\]',
re.MULTILINE)
# Matches the silly [!]-prefixed "header" that is used in some TotalRecall
# templates.
_SILLY_HEADER = lazy_re.compile(r'\[!\]\ntitle\t(?P<title>[^\n]+?)\n.+?\n\n',
re.MULTILINE | re.DOTALL)
# Matches a comment that provides a description for the message it occurs in.
_DESCRIPTION_COMMENT = lazy_re.compile(
r'<!--\s*desc\s*=\s*(?P<description>.+?)\s*-->', re.DOTALL)
# Matches a comment which is used to break apart multiple messages.
_MESSAGE_BREAK_COMMENT = lazy_re.compile(r'<!--\s*message-break\s*-->',
re.DOTALL)
# Matches a comment which is used to prevent block tags from splitting a message
_MESSAGE_NO_BREAK_COMMENT = re.compile(r'<!--\s*message-no-break\s*-->',
re.DOTALL)
_DEBUG = 0
def _DebugPrint(text):
if _DEBUG:
print text.encode('utf-8')
class HtmlChunks(object):
'''A parser that knows how to break an HTML-like document into a list of
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
def InTranslateable(self):
return self.last_translateable != -1
def Rest(self):
return self.text_[self.current:]
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
# Append a nontranslateable chunk
chunk_text = self.text_[self.chunk_start : self.last_nontranslateable + 1]
# Needed in the case where document starts with a translateable.
if len(chunk_text) > 0:
self.AddChunk(False, chunk_text)
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
self.AddChunk(True,
self.text_[self.chunk_start : self.last_translateable + 1])
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
def AdvancePast(self, match):
self.current += match.end()
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
'''
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# Remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
m = _MESSAGE_BREAK_COMMENT.search(text)
if m:
# Remove the coment from the output text. It should already effectively
# break apart messages.
text = _MESSAGE_BREAK_COMMENT.sub('', text)
if translateable and not self.last_element_ in _PREFORMATTED_TAGS:
if self.fold_whitespace_:
# Fold whitespace sequences if appropriate. This is optional because it
# alters the output strings.
text = _FOLD_WHITESPACE.sub(' ', text)
else:
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
# This whitespace folding doesn't work in all cases, thus the
# fold_whitespace flag to support backwards compatibility.
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
if text != '':
self.chunks_.append((translateable, text, description))
def Parse(self, text, fold_whitespace):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
Args:
text: The HTML for parsing.
fold_whitespace: Whether whitespace sequences should be folded into a
single space.
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
self.text_ = text
self.fold_whitespace_ = fold_whitespace
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
# Index of the character we're currently looking at.
self.current = 0
# The name of the last block element parsed.
self.last_element_ = ''
# The last explicit description we found.
self.last_description = ''
# Whether no-break was the last chunk seen
self.last_nobreak = False
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
m = _MESSAGE_NO_BREAK_COMMENT.match(self.Rest())
if m:
self.AdvancePast(m)
self.last_nobreak = True
continue
# Try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
# Whitespace is neutral, it just advances 'current' and does not switch
# between translateable/nontranslateable. If we are in a
# nontranslateable section that extends to the current point, we extend
# it to include the whitespace. If we are in a translateable section,
# we do not extend it until we find
# more translateable parts, because we never want a translateable chunk
# to end with whitespace.
if (not self.InTranslateable() and
self.last_nontranslateable == self.current - 1):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
if self.InTranslateable():
self.EndTranslateable()
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
if m:
element_name = m.group('element').lower()
if element_name in _BLOCK_TAGS:
self.last_element_ = element_name
if self.InTranslateable():
if self.last_nobreak:
self.last_nobreak = False
else:
self.EndTranslateable()
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
# if the tag is not a block tag.
sm = _SPECIAL_ELEMENT.match(self.Rest())
if sm:
# Get the appropriate group name
for group in sm.groupdict().keys():
if sm.groupdict()[group]:
break
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
self.chunk_start : self.current + sm.start(group)])
# Then a translateable for the translateable bit
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
# need to include it in the translateable.
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
self.StartTranslateable()
else:
self.last_translateable = self.current
self.current += 1
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
return self.chunks_
def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
Args:
html: 'Hello <b>[USERNAME]</b>, how <i>are</i> you?'
include_block_tags: False
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
[ Placeholder('START_BOLD', '<b>', ''),
Placeholder('USERNAME', '[USERNAME]', ''),
Placeholder('END_BOLD', '</b>', ''),
Placeholder('START_ITALIC', '<i>', ''),
Placeholder('END_ITALIC', '</i>', ''), ])
'''
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and
# - then escape all character entities in text in-between placeholders
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
Args:
base: 'phname'
type: '' | 'begin' | 'end'
Return:
Closure()
'''
name = base.upper()
if type != '':
name = ('%s_%s' % (type, base)).upper()
if name in count_names.keys():
count_names[name] += 1
else:
count_names[name] = 1
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if (type.lower() == 'end' and
base in end_names.keys() and len(end_names[base])):
return end_names[base].pop(-1) # For correct nesting
if count_names[name_] != 1:
name_ = '%s_%s' % (name_, _SUFFIXES[index])
# We need to use a stack to ensure that the end-tag suffixes match
# the begin-tag suffixes. Only needed when more than one tag of the
# same type.
if type == 'begin':
end_name = ('END_%s_%s' % (base, _SUFFIXES[index])).upper()
if base in end_names.keys():
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
return name_
return MakeFinalName
current = 0
last_nobreak = False
while current < len(html):
m = _MESSAGE_NO_BREAK_COMMENT.match(html[current:])
if m:
last_nobreak = True
current += m.end()
continue
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
ph_name = MakeNameClosure('X_%s_X' % m.group('name').replace('-', '_'))
parts.append((ph_name, m.group()))
current += m.end()
continue
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html)
element_name = 'block' # for simplification
# Get the appropriate group name
for group in m.groupdict().keys():
if m.groupdict()[group]:
break
parts.append((MakeNameClosure(element_name, 'begin'),
html[current : current + m.start(group)]))
parts.append(m.group(group))
parts.append((MakeNameClosure(element_name, 'end'),
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
if not include_block_tags and not element_name in _INLINE_TAGS:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
# Make a name for the placeholder
type = ''
if not m.group('empty'):
if m.group('closing'):
type = 'end'
else:
type = 'begin'
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
if len(parts) and isinstance(parts[-1], types.StringTypes):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
msg_text = ''
placeholders = []
for part in parts:
if isinstance(part, types.TupleType):
final_name = part[0]()
original = part[1]
msg_text += final_name
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], types.StringTypes):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
def __init__(self, *args, **kwargs):
super(TrHtml, self).__init__(*args, **kwargs)
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
self.fold_whitespace_ = False
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
This checks the fold_whitespace attribute.
Args:
attrs: The mapping of node attributes.
'''
self.fold_whitespace_ = ('fold_whitespace' in attrs and
attrs['fold_whitespace'] == 'true')
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
return [x for x in self.skeleton_ if isinstance(x, clique.MessageClique)]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
Args:
lang: 'en'
pseudo_if_not_available: True
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' is false
and there is no translation for the requested language.
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
# TODO(joi) Implement support for skeleton gatherers here.
out = []
for item in self.skeleton_:
if isinstance(item, types.StringTypes):
out.append(item)
else:
msg = item.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
for content in msg.GetContent():
if isinstance(content, tclib.Placeholder):
out.append(content.GetOriginal())
else:
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
return ''.join(out)
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self._LoadInputFile()
# Ignore the BOM character if the document starts with one.
if text.startswith(u'\ufeff'):
text = text[1:]
self.text_ = text
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements,
# unescape escaped characters, etc.
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
if m:
self.skeleton_.append(text[:m.start('title')])
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
chunks = HtmlChunks().Parse(text, self.fold_whitespace_)
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
got_text = False
if isinstance(self.skeleton_[ix], clique.MessageClique):
msg = self.skeleton_[ix].GetMessage()
for item in msg.GetContent():
if (isinstance(item, types.StringTypes) and _NON_WHITESPACE.search(item)
and item != ' '):
got_text = True
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Goes through the skeleton and finds all MessageCliques.
Args:
substituter: a grit.util.Substituter object.
'''
new_skel = []
for chunk in self.skeleton_:
if isinstance(chunk, clique.MessageClique):
old_message = chunk.GetMessage()
new_message = substituter.SubstituteMessage(old_message)
if new_message is not old_message:
new_skel.append(self.uberclique.MakeClique(new_message))
continue
new_skel.append(chunk)
self.skeleton_ = new_skel
| bsd-3-clause |
spacepajamas/DiaTop | Mapping/Mapping_1_get_document_topic_distribution.py | 1 | 1554 | # coding: utf-8
import pickle
import gensim
import sys
import pandas as pd
import datetime
mylist = []
today = datetime.date.today()
mylist.append(today)
date = str(mylist[0]) # print the date object, not the container ;-)
lda_corpus = sys.argv[1]
lda_model_name = sys.argv[2]
metadatafilename = sys.argv[3]
corpus = gensim.corpora.MmCorpus(lda_corpus)
model = gensim.models.ldamulticore.LdaMulticore.load(lda_model_name, mmap='r' )
print 'Get document topic document topic distribution from LDA model'
doc_topic_dist = {}
for i in range(len(corpus)):
print len(corpus) - i, 'left'
doc_topic_dist.update({i:model.get_document_topics(corpus[i])})
outpickelfilename = 'document_topic_distribution'+date+'.pkl'
pickle.dump(doc_topic_dist, open(outpickelfilename, 'wb'))
print 'done'
print lda_model_name.split('_')
print lda_model_name
topics = int(lda_model_name.split('_')[4])
metadata = pd.read_csv(metadatafilename)
outer_matrix = [0]*len(doc_topic_dist)
#print outer_matrix
for i in range(len(doc_topic_dist)):
inner_list = [0]*topics
#print i
for v in doc_topic_dist[i]:
inner_list[v[0]] =v[1]
#print inner_list
outer_matrix[i] = inner_list
# print outer_matrix[i]
#print outer_matrix
topic_info = pd.DataFrame(outer_matrix, columns=[i for i in range(1,topics+1)])
topic_distr_df_metadata = metadata.join(topic_info, how='outer')
outputfilename = 'M1_topic_distr_df_'+date+'.csv'
topic_distr_df_metadata.to_csv(outputfilename, sep=',', encoding='utf-8', index = False)
print topic_distr_df_metadata | gpl-3.0 |
olear/R104 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
rocky/python-uncompyle6 | uncompyle6/bin/uncompile.py | 1 | 9502 | #!/usr/bin/env python
# Mode: -*- python -*-
#
# Copyright (c) 2015-2017, 2019-2020 by Rocky Bernstein
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
#
from __future__ import print_function
import sys, os, getopt, time
program = 'uncompyle6'
__doc__ = """
Usage:
%s [OPTIONS]... [ FILE | DIR]...
%s [--help | -h | --V | --version]
Examples:
%s foo.pyc bar.pyc # decompile foo.pyc, bar.pyc to stdout
%s -o . foo.pyc bar.pyc # decompile to ./foo.pyc_dis and ./bar.pyc_dis
%s -o /tmp /usr/lib/python1.5 # decompile whole library
Options:
-o <path> output decompiled files to this path:
if multiple input files are decompiled, the common prefix
is stripped from these names and the remainder appended to
<path>
uncompyle6 -o /tmp bla/fasel.pyc bla/foo.pyc
-> /tmp/fasel.pyc_dis, /tmp/foo.pyc_dis
uncompyle6 -o /tmp bla/fasel.pyc bar/foo.pyc
-> /tmp/bla/fasel.pyc_dis, /tmp/bar/foo.pyc_dis
uncompyle6 -o /tmp /usr/lib/python1.5
-> /tmp/smtplib.pyc_dis ... /tmp/lib-tk/FixTk.pyc_dis
--compile | -c <python-file>
attempts a decompilation after compiling <python-file>
-d print timestamps
-p <integer> use <integer> number of processes
-r recurse directories looking for .pyc and .pyo files
--fragments use fragments deparser
--verify compare generated source with input byte-code
--verify-run compile generated source, run it and check exit code
--syntax-verify compile generated source
--linemaps generated line number correspondencies between byte-code
and generated source output
--encoding <encoding>
use <encoding> in generated source according to pep-0263
--help show this message
Debugging Options:
--asm | -a include byte-code (disables --verify)
--grammar | -g show matching grammar
--tree={before|after}
-t {before|after} include syntax before (or after) tree transformation
(disables --verify)
--tree++ | -T add template rules to --tree=before when possible
Extensions of generated files:
'.pyc_dis' '.pyo_dis' successfully decompiled (and verified if --verify)
+ '_unverified' successfully decompile but --verify failed
+ '_failed' decompile failed (contact author for enhancement)
""" % ((program,) * 5)
program = 'uncompyle6'
from uncompyle6 import verify
from uncompyle6.main import main, status_msg
from uncompyle6.version import __version__
def usage():
print(__doc__)
sys.exit(1)
def main_bin():
if not (sys.version_info[0:2] in ((2, 6), (2, 7), (3, 0),
(3, 1), (3, 2), (3, 3),
(3, 4), (3, 5), (3, 6),
(3, 7), (3, 8)
)):
print('Error: %s requires Python 2.6-3.8' % program,
file=sys.stderr)
sys.exit(-1)
do_verify = recurse_dirs = False
numproc = 0
outfile = '-'
out_base = None
source_paths = []
timestamp = False
timestampfmt = "# %Y.%m.%d %H:%M:%S %Z"
try:
opts, pyc_paths = getopt.getopt(sys.argv[1:], 'hac:gtTdrVo:p:',
'help asm compile= grammar linemaps recurse '
'timestamp tree= tree+ '
'fragments verify verify-run version '
'syntax-verify '
'showgrammar encoding='.split(' '))
except getopt.GetoptError as e:
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
sys.exit(-1)
options = {}
for opt, val in opts:
if opt in ('-h', '--help'):
print(__doc__)
sys.exit(0)
elif opt in ('-V', '--version'):
print("%s %s" % (program, __version__))
sys.exit(0)
elif opt == '--verify':
options['do_verify'] = 'strong'
elif opt == '--syntax-verify':
options['do_verify'] = 'weak'
elif opt == '--fragments':
options['do_fragments'] = True
elif opt == '--verify-run':
options['do_verify'] = 'verify-run'
elif opt == '--linemaps':
options['do_linemaps'] = True
elif opt in ('--asm', '-a'):
options['showasm'] = 'after'
options['do_verify'] = None
elif opt in ('--tree', '-t'):
if 'showast' not in options:
options['showast'] = {}
if val == 'before':
options['showast'][val] = True
elif val == 'after':
options['showast'][val] = True
else:
options['showast']['before'] = True
options['do_verify'] = None
elif opt in ('--tree+', '-T'):
if 'showast' not in options:
options['showast'] = {}
options['showast']['Full'] = True
options['do_verify'] = None
elif opt in ('--grammar', '-g'):
options['showgrammar'] = True
elif opt == '-o':
outfile = val
elif opt in ('--timestamp', '-d'):
timestamp = True
elif opt in ('--compile', '-c'):
source_paths.append(val)
elif opt == '-p':
numproc = int(val)
elif opt in ('--recurse', '-r'):
recurse_dirs = True
elif opt == '--encoding':
options['source_encoding'] = val
else:
print(opt, file=sys.stderr)
usage()
# expand directory if specified
if recurse_dirs:
expanded_files = []
for f in pyc_paths:
if os.path.isdir(f):
for root, _, dir_files in os.walk(f):
for df in dir_files:
if df.endswith('.pyc') or df.endswith('.pyo'):
expanded_files.append(os.path.join(root, df))
pyc_paths = expanded_files
# argl, commonprefix works on strings, not on path parts,
# thus we must handle the case with files in 'some/classes'
# and 'some/cmds'
src_base = os.path.commonprefix(pyc_paths)
if src_base[-1:] != os.sep:
src_base = os.path.dirname(src_base)
if src_base:
sb_len = len( os.path.join(src_base, '') )
pyc_paths = [f[sb_len:] for f in pyc_paths]
if not pyc_paths and not source_paths:
print("No input files given to decompile", file=sys.stderr)
usage()
if outfile == '-':
outfile = None # use stdout
elif outfile and os.path.isdir(outfile):
out_base = outfile; outfile = None
elif outfile and len(pyc_paths) > 1:
out_base = outfile; outfile = None
if timestamp:
print(time.strftime(timestampfmt))
if numproc <= 1:
try:
result = main(src_base, out_base, pyc_paths, source_paths, outfile,
**options)
result = list(result) + [options.get('do_verify', None)]
if len(pyc_paths) > 1:
mess = status_msg(do_verify, *result)
print('# ' + mess)
pass
except (KeyboardInterrupt):
pass
except verify.VerifyCmpError:
raise
else:
from multiprocessing import Process, Queue
try:
from Queue import Empty
except ImportError:
from queue import Empty
fqueue = Queue(len(pyc_paths)+numproc)
for f in pyc_paths:
fqueue.put(f)
for i in range(numproc):
fqueue.put(None)
rqueue = Queue(numproc)
def process_func():
try:
(tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
while 1:
f = fqueue.get()
if f is None:
break
(t, o, f, v) = \
main(src_base, out_base, [f], [], outfile, **options)
tot_files += t
okay_files += o
failed_files += f
verify_failed_files += v
except (Empty, KeyboardInterrupt):
pass
rqueue.put((tot_files, okay_files, failed_files, verify_failed_files))
rqueue.close()
try:
procs = [Process(target=process_func) for i in range(numproc)]
for p in procs:
p.start()
for p in procs:
p.join()
try:
(tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
while True:
(t, o, f, v) = rqueue.get(False)
tot_files += t
okay_files += o
failed_files += f
verify_failed_files += v
except Empty:
pass
print('# decompiled %i files: %i okay, %i failed, %i verify failed' %
(tot_files, okay_files, failed_files, verify_failed_files))
except (KeyboardInterrupt, OSError):
pass
if timestamp:
print(time.strftime(timestampfmt))
return
if __name__ == '__main__':
main_bin()
| gpl-3.0 |
sarakha63/persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/lrt.py | 21 | 2028 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
parse_duration,
remove_end,
)
class LRTIE(InfoExtractor):
IE_NAME = 'lrt.lt'
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
'info_dict': {
'id': '54391',
'ext': 'mp4',
'title': 'Septynios Kauno dienos',
'description': 'md5:24d84534c7dc76581e59f5689462411a',
'duration': 1783,
},
'params': {
'skip_download': True, # HLS download
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = remove_end(self._og_search_title(webpage), ' - LRT')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
duration = parse_duration(self._search_regex(
r"'duration':\s*'([^']+)',", webpage,
'duration', fatal=False, default=None))
formats = []
for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
data = json.loads(js_to_json(js))
if data['provider'] == 'rtmp':
formats.append({
'format_id': 'rtmp',
'ext': determine_ext(data['file']),
'url': data['streamer'],
'play_path': 'mp4:%s' % data['file'],
'preference': -1,
})
else:
formats.extend(
self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
}
| gpl-3.0 |
manderson23/NewsBlur | apps/rss_feeds/migrations/0023_queued_date.py | 18 | 9586 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feed.queued_date'
db.add_column('feeds', 'queued_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Feed.queued_date'
db.delete_column('feeds', 'queued_date')
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedfetchhistory': {
'Meta': {'object_name': 'FeedFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.pagefetchhistory': {
'Meta': {'object_name': 'PageFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'ordering': "['-story_date']", 'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'Meta': {'object_name': 'Tag'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
| mit |
chand3040/cloud_that | openedx/core/lib/logsettings.py | 127 | 5765 | """Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
| agpl-3.0 |
achanda/flocker | flocker/acceptance/obsolete/test_containers.py | 3 | 14946 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for the control service REST API.
"""
import socket
from contextlib import closing
from json import loads
from json import dumps
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.internet.defer import gatherResults
from treq import get, post, content
from eliot import Message
from ...testtools import (
loop_until, random_name,
)
from ..testtools import (
require_cluster, require_moving_backend, create_dataset,
create_python_container
)
CURRENT_DIRECTORY = FilePath(__file__).parent()
def verify_socket(host, port):
"""
Wait until the destionation can be connected to.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:return Deferred: Firing when connection is possible.
"""
def can_connect():
with closing(socket.socket()) as s:
conn = s.connect_ex((host, port))
Message.new(
message_type="acceptance:verify_socket",
host=host,
port=port,
result=conn,
).write()
return conn == 0
dl = loop_until(can_connect)
return dl
class ContainerAPITests(TestCase):
"""
Tests for the container API.
"""
def _create_container(self, cluster):
"""
Create a container listening on port 8080.
:return: ``Deferred`` firing with a container dictionary once the
container is up and running.
"""
d = create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080, u"external": 8080}],
u"node_uuid": cluster.nodes[0].uuid,
}, CURRENT_DIRECTORY.child(b"hellohttp.py"))
def check_result(response):
dl = verify_socket(cluster.nodes[0].public_address, 8080)
dl.addCallback(lambda _: response)
return dl
d.addCallback(check_result)
return d
@require_cluster(1)
def test_create_container_with_ports(self, cluster):
"""
Create a container including port mappings on a single-node cluster.
"""
return self._create_container(cluster)
@require_cluster(1)
def test_create_container_with_environment(self, cluster):
"""
If environment variables are specified when creating a container,
those variables are available in the container's environment.
"""
environment = {u"XBLOO": u"YBLAH", u"ZBLOO": u"ZEBRA"}
d = create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080, u"external": 8080}],
u"node_uuid": cluster.nodes[0].uuid,
u"environment": environment,
}, CURRENT_DIRECTORY.child(b"envhttp.py"))
def checked(_):
host = cluster.nodes[0].public_address
d = self.query_http_server(host, 8080)
d.addCallback(lambda data: dict(loads(data)))
return d
d.addCallback(checked)
d.addCallback(
lambda response:
self.assertDictContainsSubset(environment, response)
)
return d
@require_moving_backend
@require_cluster(2)
def test_move_container_with_dataset(self, cluster):
"""
Create a container with an attached dataset, issue API call
to move the container. Wait until we can connect to the running
container on the new host and verify the data has moved with it.
"""
data = {u"the data": u"it moves"}
post_data = {"data": dumps(data)}
node1, node2 = cluster.nodes
container_name = random_name(self)
creating_dataset = create_dataset(self, cluster)
def create_container(dataset):
d = create_python_container(
self, cluster, {
u"name": container_name,
u"ports": [{u"internal": 8080, u"external": 8080}],
u"node_uuid": node1.uuid,
u"volumes": [{u"dataset_id": dataset[u"dataset_id"],
u"mountpoint": u"/data"}],
}, CURRENT_DIRECTORY.child(b"datahttp.py"),
additional_arguments=[u"/data"],
)
return d
creating_dataset.addCallback(create_container)
creating_dataset.addCallback(
lambda _: self.post_http_server(
node1.public_address, 8080, post_data)
)
def move_container(_):
moved = cluster.move_container(
container_name, node2.uuid
)
return moved
creating_dataset.addCallback(move_container)
creating_dataset.addCallback(
lambda _: self.assert_http_server(
node2.public_address, 8080,
expected_response=post_data["data"])
)
return creating_dataset
@require_cluster(1)
def test_create_container_with_dataset(self, cluster):
"""
Create a container with an attached dataset, write some data,
shut it down, create a new container with same dataset, make sure
the data is still there.
"""
data = {u"the data": u"sample written data"}
post_data = {"data": dumps(data)}
node = cluster.nodes[0]
container_name = random_name(self)
creating_dataset = create_dataset(self, cluster)
self.dataset_id = None
def create_container(dataset):
self.dataset_id = dataset[u"dataset_id"]
d = create_python_container(
self, cluster, {
u"name": container_name,
u"ports": [{u"internal": 8080, u"external": 8080}],
u"node_uuid": node.uuid,
u"volumes": [{u"dataset_id": self.dataset_id,
u"mountpoint": u"/data"}],
}, CURRENT_DIRECTORY.child(b"datahttp.py"),
additional_arguments=[u"/data"],
cleanup=False,
)
return d
creating_dataset.addCallback(create_container)
creating_dataset.addCallback(
lambda _: self.post_http_server(
node.public_address, 8080, post_data)
)
creating_dataset.addCallback(
lambda _: self.assert_http_server(
node.public_address, 8080,
expected_response=post_data["data"])
)
creating_dataset.addCallback(
lambda _: cluster.remove_container(container_name))
def create_second_container(_):
d = create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080, u"external": 8081}],
u"node_uuid": node.uuid,
u"volumes": [{u"dataset_id": self.dataset_id,
u"mountpoint": u"/data"}],
}, CURRENT_DIRECTORY.child(b"datahttp.py"),
additional_arguments=[u"/data"],
)
return d
creating_dataset.addCallback(create_second_container)
creating_dataset.addCallback(
lambda _: self.assert_http_server(
node.public_address, 8081,
expected_response=post_data["data"])
)
return creating_dataset
@require_cluster(1)
def test_current(self, cluster):
"""
The current container endpoint includes a currently running container.
"""
creating = self._create_container(cluster)
def created(data):
data[u"running"] = True
def in_current():
current = cluster.current_containers()
current.addCallback(lambda result: data in result)
return current
return loop_until(in_current)
creating.addCallback(created)
return creating
def post_http_server(self, host, port, data, expected_response=b"ok"):
"""
Make a POST request to an HTTP server on the given host and port
and assert that the response body matches the expected response.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param bytes data: The raw request body data.
:param bytes expected_response: The HTTP response body expected.
Defaults to b"ok"
"""
def make_post(host, port, data):
request = post(
"http://{host}:{port}".format(host=host, port=port),
data=data,
persistent=False
)
def failed(failure):
Message.new(message_type=u"acceptance:http_query_failed",
reason=unicode(failure)).write()
return False
request.addCallbacks(content, failed)
return request
d = verify_socket(host, port)
d.addCallback(lambda _: loop_until(lambda: make_post(
host, port, data)))
d.addCallback(self.assertEqual, expected_response)
return d
def query_http_server(self, host, port, path=b""):
"""
Return the response from a HTTP server.
We try multiple since it may take a little time for the HTTP
server to start up.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param bytes path: Optional path and query string.
:return: ``Deferred`` that fires with the body of the response.
"""
def query():
req = get(
"http://{host}:{port}{path}".format(
host=host, port=port, path=path),
persistent=False
)
def failed(failure):
Message.new(message_type=u"acceptance:http_query_failed",
reason=unicode(failure)).write()
return False
req.addCallbacks(content, failed)
return req
d = verify_socket(host, port)
d.addCallback(lambda _: loop_until(query))
return d
def assert_http_server(self, host, port,
path=b"", expected_response=b"hi"):
"""
Assert that a HTTP serving a response with body ``b"hi"`` is running
at given host and port.
This can be coupled with code that only conditionally starts up
the HTTP server via Flocker in order to check if that particular
setup succeeded.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param bytes path: Optional path and query string.
:param bytes expected_response: The HTTP response body expected.
Defaults to b"hi"
:return: ``Deferred`` that fires when assertion has run.
"""
d = self.query_http_server(host, port, path)
d.addCallback(self.assertEqual, expected_response)
return d
@require_cluster(1)
def test_non_root_container_can_access_dataset(self, cluster):
"""
A container running as a user that is not root can write to a
dataset attached as a volume.
"""
node = cluster.nodes[0]
creating_dataset = create_dataset(self, cluster)
def created_dataset(dataset):
return create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080, u"external": 8080}],
u"node_uuid": node.uuid,
u"volumes": [{u"dataset_id": dataset[u"dataset_id"],
u"mountpoint": u"/data"}],
}, CURRENT_DIRECTORY.child(b"nonrootwritehttp.py"),
additional_arguments=[u"/data"])
creating_dataset.addCallback(created_dataset)
creating_dataset.addCallback(
lambda _: self.assert_http_server(node.public_address, 8080))
return creating_dataset
@require_cluster(2)
def test_linking(self, cluster):
"""
A link from an origin container to a destination container allows the
origin container to establish connections to the destination container
when the containers are running on different machines using an address
obtained from ``<ALIAS>_PORT_<PORT>_TCP_{ADDR,PORT}``-style environment
set in the origin container's environment.
"""
destination_port = 8080
origin_port = 8081
[destination, origin] = cluster.nodes
running = gatherResults([
create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080,
u"external": destination_port}],
u"node_uuid": destination.uuid,
}, CURRENT_DIRECTORY.child(b"hellohttp.py")),
create_python_container(
self, cluster, {
u"ports": [{u"internal": 8081,
u"external": origin_port}],
u"links": [{u"alias": "dest", u"local_port": 80,
u"remote_port": destination_port}],
u"node_uuid": origin.uuid,
}, CURRENT_DIRECTORY.child(b"proxyhttp.py")),
# Wait for the link target container to be accepting connections.
verify_socket(destination.public_address, destination_port),
# Wait for the link source container to be accepting connections.
verify_socket(origin.public_address, origin_port),
])
running.addCallback(
lambda _: self.assert_http_server(
origin.public_address, origin_port))
return running
@require_cluster(2)
def test_traffic_routed(self, cluster):
"""
An application can be accessed even from a connection to a node
which it is not running on.
"""
port = 8080
[destination, origin] = cluster.nodes
running = gatherResults([
create_python_container(
self, cluster, {
u"ports": [{u"internal": 8080, u"external": port}],
u"node_uuid": destination.uuid,
}, CURRENT_DIRECTORY.child(b"hellohttp.py")),
# Wait for the destination to be accepting connections.
verify_socket(destination.public_address, port),
# Wait for the origin container to be accepting connections.
verify_socket(origin.public_address, port),
])
running.addCallback(
# Connect to the machine where the container is NOT running:
lambda _: self.assert_http_server(origin.public_address, port))
return running
| apache-2.0 |
Gateworks/platform-external-chromium_org | tools/metrics/histograms/extract_histograms.py | 28 | 13576 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extract histogram names from the description XML file.
For more information on the format of the XML file, which is self-documenting,
see histograms.xml; however, here is a simple example to get you started. The
XML below will generate the following five histograms:
HistogramTime
HistogramEnum
HistogramEnum_Chrome
HistogramEnum_IE
HistogramEnum_Firefox
<histogram-configuration>
<histograms>
<histogram name="HistogramTime" units="milliseconds">
<summary>A brief description.</summary>
<details>This is a more thorough description of this histogram.</details>
</histogram>
<histogram name="HistogramEnum" enum="MyEnumType">
<summary>This histogram sports an enum value type.</summary>
</histogram>
</histograms>
<enums>
<enum name="MyEnumType">
<summary>This is an example enum type, where the values mean little.</summary>
<int value="1" label="FIRST_VALUE">This is the first value.</int>
<int value="2" label="SECOND_VALUE">This is the second value.</int>
</enum>
</enums>
<fieldtrials>
<fieldtrial name="BrowserType">
<group name="Chrome"/>
<group name="IE"/>
<group name="Firefox"/>
<affected-histogram name="HistogramEnum"/>
</fieldtrial>
</fieldtrials>
</histogram-configuration>
"""
import copy
import logging
import xml.dom.minidom
MAX_FIELDTRIAL_DEPENDENCY_DEPTH = 5
class Error(Exception):
pass
def _JoinChildNodes(tag):
"""Join child nodes into a single text.
Applicable to leafs like 'summary' and 'detail'.
Args:
tag: parent node
Returns:
a string with concatenated nodes' text representation.
"""
return ''.join(c.toxml() for c in tag.childNodes).strip()
def _NormalizeString(s):
"""Normalizes a string (possibly of multiple lines) by replacing each
whitespace sequence with a single space.
Args:
s: The string to normalize, e.g. ' \n a b c\n d '
Returns:
The normalized string, e.g. 'a b c d'
"""
return ' '.join(s.split())
def _NormalizeAllAttributeValues(node):
"""Recursively normalizes all tag attribute values in the given tree.
Args:
node: The minidom node to be normalized.
Returns:
The normalized minidom node.
"""
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
for a in node.attributes.keys():
node.attributes[a].value = _NormalizeString(node.attributes[a].value)
for c in node.childNodes: _NormalizeAllAttributeValues(c)
return node
def _ExpandHistogramNameWithFieldTrial(group_name, histogram_name, fieldtrial):
"""Creates a new histogram name based on the field trial group.
Args:
group_name: The name of the field trial group. May be empty.
histogram_name: The name of the histogram. May be of the form
Group.BaseName or BaseName
field_trial: The FieldTrial XML element.
Returns:
A string with the expanded histogram name.
Raises:
Error if the expansion can't be done.
"""
if fieldtrial.hasAttribute('separator'):
separator = fieldtrial.getAttribute('separator')
else:
separator = '_'
if fieldtrial.hasAttribute('ordering'):
ordering = fieldtrial.getAttribute('ordering')
else:
ordering = 'suffix'
if ordering not in ['prefix', 'suffix']:
logging.error('ordering needs to be prefix or suffix, value is %s' %
ordering)
raise Error()
if not group_name:
return histogram_name
if ordering == 'suffix':
return histogram_name + separator + group_name
# For prefixes, the group_name is inserted between the "cluster" and the
# "remainder", e.g. Foo.BarHist expanded with gamma becomes Foo.gamma_BarHist.
sections = histogram_name.split('.')
if len(sections) <= 1:
logging.error(
'Prefix Field Trial expansions require histogram names which include a '
'dot separator. Histogram name is %s, and Field Trial is %s' %
(histogram_name, fieldtrial.getAttribute('name')))
raise Error()
cluster = sections[0] + '.'
remainder = '.'.join(sections[1:])
return cluster + group_name + separator + remainder
def _ExtractEnumsFromXmlTree(tree):
"""Extract all <enum> nodes in the tree into a dictionary."""
enums = {}
have_errors = False
last_name = None
for enum in tree.getElementsByTagName("enum"):
if enum.getAttribute('type') != 'int':
logging.error('Unknown enum type %s' % enum.getAttribute('type'))
have_errors = True
continue
name = enum.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Enums %s and %s are not in alphabetical order'
% (last_name, name))
have_errors = True
last_name = name
if name in enums:
logging.error('Duplicate enum %s' % name)
have_errors = True
continue
last_int_value = None
enum_dict = {}
enum_dict['name'] = name
enum_dict['values'] = {}
for int_tag in enum.getElementsByTagName("int"):
value_dict = {}
int_value = int(int_tag.getAttribute('value'))
if last_int_value is not None and int_value < last_int_value:
logging.error('Enum %s int values %d and %d are not in numerical order'
% (name, last_int_value, int_value))
have_errors = True
last_int_value = int_value
if int_value in enum_dict['values']:
logging.error('Duplicate enum value %d for enum %s' % (int_value, name))
have_errors = True
continue
value_dict['label'] = int_tag.getAttribute('label')
value_dict['summary'] = _JoinChildNodes(int_tag)
enum_dict['values'][int_value] = value_dict
summary_nodes = enum.getElementsByTagName("summary")
if len(summary_nodes) > 0:
enum_dict['summary'] = _NormalizeString(_JoinChildNodes(summary_nodes[0]))
enums[name] = enum_dict
return enums, have_errors
def _ExtractHistogramsFromXmlTree(tree, enums):
"""Extract all <histogram> nodes in the tree into a dictionary."""
# Process the histograms. The descriptions can include HTML tags.
histograms = {}
have_errors = False
last_name = None
for histogram in tree.getElementsByTagName("histogram"):
name = histogram.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Histograms %s and %s are not in alphabetical order'
% (last_name, name))
have_errors = True
last_name = name
if name in histograms:
logging.error('Duplicate histogram definition %s' % name)
have_errors = True
continue
histograms[name] = histogram_entry = {}
# Find <summary> tag.
summary_nodes = histogram.getElementsByTagName("summary")
if len(summary_nodes) > 0:
histogram_entry['summary'] = _NormalizeString(
_JoinChildNodes(summary_nodes[0]))
else:
histogram_entry['summary'] = 'TBD'
# Find <obsolete> tag.
obsolete_nodes = histogram.getElementsByTagName("obsolete")
if len(obsolete_nodes) > 0:
reason = _JoinChildNodes(obsolete_nodes[0])
histogram_entry['obsolete'] = reason
# Handle units.
if histogram.hasAttribute('units'):
histogram_entry['units'] = histogram.getAttribute('units')
# Find <details> tag.
details_nodes = histogram.getElementsByTagName("details")
if len(details_nodes) > 0:
histogram_entry['details'] = _NormalizeString(
_JoinChildNodes(details_nodes[0]))
# Handle enum types.
if histogram.hasAttribute('enum'):
enum_name = histogram.getAttribute('enum')
if not enum_name in enums:
logging.error('Unknown enum %s in histogram %s' % (enum_name, name))
have_errors = True
else:
histogram_entry['enum'] = enums[enum_name]
return histograms, have_errors
def _UpdateHistogramsWithFieldTrialInformation(tree, histograms):
"""Process field trials' tags and combine with affected histograms.
The histograms dictionary will be updated in-place by adding new histograms
created by combining histograms themselves with field trials targetting these
histograms.
Args:
tree: XML dom tree.
histograms: a dictinary of histograms previously extracted from the tree;
Returns:
True if any errors were found.
"""
have_errors = False
# Verify order of fieldtrial fields first.
last_name = None
for fieldtrial in tree.getElementsByTagName("fieldtrial"):
name = fieldtrial.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Field trials %s and %s are not in alphabetical order'
% (last_name, name))
have_errors = True
last_name = name
# Field trials can depend on other field trials, so we need to be careful.
# Make a temporary copy of the list of field trials to use as a queue.
# Field trials whose dependencies have not yet been processed will get
# relegated to the back of the queue to be processed later.
reprocess_queue = []
def GenerateFieldTrials():
for f in tree.getElementsByTagName("fieldtrial"): yield 0, f
for r, f in reprocess_queue: yield r, f
for reprocess_count, fieldtrial in GenerateFieldTrials():
# Check dependencies first
dependencies_valid = True
affected_histograms = fieldtrial.getElementsByTagName('affected-histogram')
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if not histogram_name in histograms:
# Base histogram is missing
dependencies_valid = False
missing_dependency = histogram_name
break
if not dependencies_valid:
if reprocess_count < MAX_FIELDTRIAL_DEPENDENCY_DEPTH:
reprocess_queue.append( (reprocess_count + 1, fieldtrial) )
continue
else:
logging.error('Field trial %s is missing its dependency %s'
% (fieldtrial.getAttribute('name'),
missing_dependency))
have_errors = True
continue
name = fieldtrial.getAttribute('name')
groups = fieldtrial.getElementsByTagName('group')
group_labels = {}
for group in groups:
group_labels[group.getAttribute('name')] = group.getAttribute('label')
last_histogram_name = None
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if (last_histogram_name is not None
and histogram_name.lower() < last_histogram_name.lower()):
logging.error('Affected histograms %s and %s of field trial %s are not '
'in alphabetical order'
% (last_histogram_name, histogram_name, name))
have_errors = True
last_histogram_name = histogram_name
base_description = histograms[histogram_name]
with_groups = affected_histogram.getElementsByTagName('with-group')
if len(with_groups) > 0:
histogram_groups = with_groups
else:
histogram_groups = groups
for group in histogram_groups:
group_name = group.getAttribute('name')
try:
new_histogram_name = _ExpandHistogramNameWithFieldTrial(
group_name, histogram_name, fieldtrial)
if new_histogram_name != histogram_name:
histograms[new_histogram_name] = copy.deepcopy(
histograms[histogram_name])
group_label = group_labels.get(group_name, '')
if not 'fieldtrial_groups' in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_groups'] = []
histograms[new_histogram_name]['fieldtrial_groups'].append(group_name)
if not 'fieldtrial_names' in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_names'] = []
histograms[new_histogram_name]['fieldtrial_names'].append(name)
if not 'fieldtrial_labels' in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_labels'] = []
histograms[new_histogram_name]['fieldtrial_labels'].append(
group_label)
except Error:
have_errors = True
return have_errors
def ExtractHistogramsFromFile(file_handle):
"""Compute the histogram names and descriptions from the XML representation.
Args:
file_handle: A file or file-like with XML content.
Returns:
a tuple of (histograms, status) where histograms is a dictionary mapping
histogram names to dictionaries containing histogram descriptions and status
is a boolean indicating if errros were encoutered in processing.
"""
tree = xml.dom.minidom.parse(file_handle)
_NormalizeAllAttributeValues(tree)
enums, enum_errors = _ExtractEnumsFromXmlTree(tree)
histograms, histogram_errors = _ExtractHistogramsFromXmlTree(tree, enums)
update_errors = _UpdateHistogramsWithFieldTrialInformation(tree, histograms)
return histograms, enum_errors or histogram_errors or update_errors
def ExtractHistograms(filename):
"""Load histogram definitions from a disk file.
Args:
filename: a file path to load data from.
Raises:
Error if the file is not well-formatted.
"""
with open(filename, 'r') as f:
histograms, had_errors = ExtractHistogramsFromFile(f)
if had_errors:
logging.error('Error parsing %s' % filename)
raise Error()
return histograms
def ExtractNames(histograms):
return sorted(histograms.keys())
| bsd-3-clause |
rpm5/createrepo_c | tests/python/tests/test_load_metadata.py | 1 | 2416 | import unittest
import createrepo_c as cr
from .fixtures import *
class TestCaseLoadMetadata(unittest.TestCase):
def test_load_metadata__repo00(self):
md = cr.Metadata()
md.locate_and_load_xml(REPO_00_PATH)
self.assertTrue(md)
self.assertEqual(md.key, cr.HT_KEY_DEFAULT)
self.assertEqual(md.len(), 0)
self.assertEqual(md.keys(), [])
self.assertFalse(md.has_key("foo"))
self.assertFalse(md.has_key(""))
self.assertFalse(md.remove("foo"))
self.assertFalse(md.get("xxx"))
def test_load_metadata_repo01(self):
md = cr.Metadata()
md.locate_and_load_xml(REPO_01_PATH)
self.assertTrue(md)
self.assertEqual(md.key, cr.HT_KEY_DEFAULT)
self.assertEqual(md.len(), 1)
self.assertEqual(md.keys(), ['152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf'])
self.assertFalse(md.has_key("foo"))
self.assertFalse(md.has_key(""))
self.assertFalse(md.remove("foo"))
pkg = md.get('152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf')
self.assertTrue(pkg)
self.assertEqual(pkg.name, "super_kernel")
def test_load_metadata_repo02(self):
md = cr.Metadata()
md.locate_and_load_xml(REPO_02_PATH)
self.assertTrue(md)
self.assertEqual(md.key, cr.HT_KEY_DEFAULT)
self.assertEqual(md.len(), 2)
self.assertEqual(md.keys(),
['6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3',
'90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7'])
self.assertFalse(md.has_key("foo"))
self.assertFalse(md.has_key(""))
self.assertFalse(md.remove("foo"))
pkg = md.get('152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf')
self.assertEqual(pkg, None)
pkg = md.get('90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7')
self.assertEqual(pkg.name, "fake_bash")
def test_load_metadata_repo02_destructor(self):
md = cr.Metadata(use_single_chunk=True)
md.locate_and_load_xml(REPO_02_PATH)
pkg = md.get('90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7')
del(md) # in fact, md shoudnot be destroyed yet, because it is
# referenced from pkg!
self.assertEqual(pkg.name, "fake_bash")
| gpl-2.0 |
t00m/KB4IT | kb4it/services/database.py | 1 | 5302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RDF Graph In Memory database module.
# Author: Tomás Vírseda <tomasvirseda@gmail.com>
# License: GPLv3
# Description: In-memory database module
"""
from kb4it.core.service import Service
from kb4it.core.util import guess_datetime, sort_dictionary
class KB4ITDB(Service):
"""KB4IT database class."""
db = {}
sort_attribute = None
sorted_docs = []
blocked_keys = []
ignored_keys = []
def initialize(self):
"""Initialize database module."""
params = self.app.get_params()
self.sort_attribute = params.SORT_ATTRIBUTE
self.db = {}
self.sorted_docs = []
self.ignored_keys = self.blocked_keys = ['Title', 'Timestamp']
def del_document(self, doc):
"""Delete a document node from database."""
adoc = "%s.adoc" % doc
try:
del self.db[adoc]
self.log.debug("[DB] - DOC[%s] deleted from database", doc)
self.sort_database()
except KeyError:
self.log.debug("[DB] - DOC[%s] not found in database", doc)
def add_document(self, doc):
"""Add a new document node to the database."""
self.db[doc] = {}
self.log.debug("[DB] - DOC[%s] added to database", doc)
def add_document_key(self, doc, key, value):
"""Add a new key/value node for a given document."""
try:
alist = self.db[doc][key]
alist.append(value)
self.db[doc][key] = alist
except KeyError:
self.db[doc][key] = [value]
self.log.debug("[DB] - DOC[%s] KEY[%s] VALUE[%s] added", doc, key, value)
def get_blocked_keys(self):
"""Return blocked keys."""
return self.blocked_keys
def get_ignored_keys(self):
"""Return ignored keys."""
return self.ignored_keys
def ignore_key(self, key):
"""Add given key to ignored keys list."""
self.ignored_keys.append(key)
def sort_database(self):
"""
Build a list of documents.
Documents sorted by the given date attribute in descending order.
"""
self.sorted_docs = self.sort_by_date(list(self.db.keys()))
def sort_by_date(self, doclist):
"""Build a list of documents sorted by timestamp desc."""
sorted_docs = []
adict = {}
for doc in doclist:
sdate = self.get_doc_timestamp(doc)
ts = guess_datetime(sdate)
if ts is not None:
adict[doc] = ts
else:
self.log.error("[DB] - Doc '%s' doesn't have a valid timestamp?", doc)
alist = sort_dictionary(adict)
for doc, timestamp in alist:
sorted_docs.append(doc)
return sorted_docs
def get_documents(self):
"""Return the list of sorted docs."""
return self.sorted_docs
def get_doc_timestamp(self, doc):
"""Get timestamp for a given document."""
try:
timestamp = self.db[doc][self.sort_attribute][0]
except:
timestamp = self.db[doc]['Timestamp'][0]
return timestamp
def get_doc_properties(self, doc):
"""Return a dictionary with the properties of a given doc."""
return self.db[doc]
def get_values(self, doc, key):
"""Return a list of values given a document and a key."""
try:
return self.db[doc][key]
except KeyError:
return ['']
def get_all_values_for_key(self, key):
"""Return a list of all values for a given key sorted alphabetically."""
values = []
for doc in self.db:
try:
values.extend(self.db[doc][key])
except KeyError:
pass
values = list(set(values))
values.sort(key=lambda y: y.lower())
return values
def get_custom_keys(self, doc):
"""Return a list of custom keys sorted alphabetically."""
custom_keys = []
keys = self.get_doc_keys(doc)
for key in keys:
if key not in self.ignored_keys:
custom_keys.append(key)
custom_keys.sort(key=lambda y: y.lower())
return custom_keys
def get_all_keys(self):
"""Return all keys in the database sorted alphabetically."""
blocked_keys = self.get_blocked_keys()
keys = set()
database = self.get_documents()
for doc in database:
for key in self.get_doc_keys(doc):
if key not in blocked_keys:
keys.add(key)
keys = list(keys)
keys.sort(key=lambda y: y.lower())
return keys
def get_docs_by_key_value(self, key, value):
"""Return a list documents for a given key/value sorted by date."""
docs = []
for doc in self.db:
try:
if value in self.db[doc][key]:
docs.append(doc)
except KeyError:
pass
return self.sort_by_date(docs)
def get_doc_keys(self, doc):
"""Return a list of keys for a given doc sorted alphabetically."""
keys = []
for key in self.db[doc]:
keys.append(key)
keys.sort(key=lambda y: y.lower())
return keys
| gpl-3.0 |
jusbrasil/pycan | pycan/exceptions.py | 1 | 1093 | class UnauthorizedResourceError(Exception):
def __init__(self, action, app_context, user, context, resource):
Exception.__init__(self, 'Action %s of context %s unauthorized' % (action, app_context))
self.action = action
self.app_context = app_context
self.user = user
self.context = context
self.resource = resource
class ActionNotFoundError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ContextNotFoundError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ActionAlreadyExistsError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ContextAlreadyHasAsteriskError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ContextAlreadyHasActionsError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ActionListWithAsteriskError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
| mit |
vixns/marathon-lb | utils.py | 2 | 14538 | #!/usr/bin/env python3
import hashlib
from io import BytesIO
import logging
import socket
import pycurl
from common import DCOSAuth
from lrucache import LRUCache
logger = logging.getLogger('utils')
# The maximum number of clashes to allow when assigning a port.
MAX_CLASHES = 50
class ServicePortAssigner(object):
"""
Helper class to assign service ports.
Ordinarily Marathon should assign the service ports, but Marathon issue
https://github.com/mesosphere/marathon/issues/3636 means that service
ports are not returned for applications using IP-per-task. We work around
that here by assigning deterministic ports from a configurable range when
required.
Note that auto-assigning ports is only useful when using vhost: the ports
that we assign here are not exposed to the client.
The LB command line options --min-serv-port-ip-per-task and
--max-serv-port-ip-per-task specify the allowed range of ports to
auto-assign from. The range of ports used for auto-assignment should be
selected to ensure no clashes with the exposed LB ports and the
Marathon-assigned services ports.
The service port assigner provides a mechanism to auto assign service ports
using the application name to generate service port (while preventing
clashes when the port is already claimed by another app). The assigner
provides a deterministic set of ports for a given ordered set of port
requests.
"""
def __init__(self):
self.min_port = None
self.max_port = None
self.max_ports = None
self.can_assign = False
self.next_port = None
self.ports_by_app = {}
def _assign_new_service_port(self, app, task_port):
assert self.can_assign
if self.max_ports <= len(self.ports_by_app):
logger.warning("Service ports are exhausted")
return None
# We don't want to be searching forever, so limit the number of times
# we clash to the number of remaining ports.
ports = self.ports_by_app.values()
port = None
for i in range(MAX_CLASHES):
hash_str = "%s-%s-%s" % (app['id'], task_port, i)
hash_val = hashlib.sha1(hash_str.encode("utf-8")).hexdigest()
hash_int = int(hash_val[:8], 16)
trial_port = self.min_port + (hash_int % self.max_ports)
if trial_port not in ports:
port = trial_port
break
if port is None:
for port in range(self.min_port, self.max_port + 1):
if port not in ports:
break
# We must have assigned a unique port by now since we know there were
# some available.
assert port and port not in ports, port
logger.debug("Assigned new port: %d", port)
return port
def _get_service_port(self, app, task_port):
key = (app['id'], task_port)
port = (self.ports_by_app.get(key) or
self._assign_new_service_port(app, task_port))
self.ports_by_app[key] = port
return port
def set_ports(self, min_port, max_port):
"""
Set the range of ports that we can use for auto-assignment of
service ports - just for IP-per-task apps.
:param min_port: The minimum port value
:param max_port: The maximum port value
"""
assert not self.ports_by_app
assert max_port >= min_port
self.min_port = min_port
self.max_port = max_port
self.max_ports = max_port - min_port + 1
self.can_assign = self.min_port and self.max_port
def reset(self):
"""
Reset the assigner so that ports are newly assigned.
"""
self.ports_by_app = {}
def get_service_ports(self, app):
"""
Return a list of service ports for this app.
:param app: The application.
:return: The list of ports. Note that if auto-assigning and ports
become exhausted, a port may be returned as None.
"""
mode = get_app_networking_mode(app)
if mode == "container" or mode == "container/bridge":
# Here we must use portMappings
portMappings = get_app_port_mappings(app)
if len(portMappings) > 0:
ports = filter(lambda p: p is not None,
map(lambda p: p.get('servicePort', None),
portMappings))
ports = list(ports)
if ports:
return list(ports)
ports = app.get('ports', [])
if 'portDefinitions' in app:
ports = filter(lambda p: p is not None,
map(lambda p: p.get('port', None),
app.get('portDefinitions', []))
)
ports = list(ports) # wtf python?
# This supports legacy ip-per-container for Marathon 1.4.x and prior
if not ports and mode == "container" and self.can_assign \
and len(app['tasks']) > 0:
task = app['tasks'][0]
task_ports = get_app_task_ports(app, task, mode)
if len(task_ports) > 0:
ports = [self._get_service_port(app, task_port)
for task_port in task_ports]
logger.debug("Service ports: %r", ports)
return ports
class CurlHttpEventStream(object):
def __init__(self, url, auth, verify):
self.url = url
self.received_buffer = BytesIO()
headers = ['Cache-Control: no-cache', 'Accept: text/event-stream']
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.ENCODING, 'gzip')
self.curl.setopt(pycurl.CONNECTTIMEOUT, 10)
self.curl.setopt(pycurl.WRITEDATA, self.received_buffer)
# Marathon >= 1.7.x returns 30x responses for /v2/events responses
# when they're coming from a non-leader. So we follow redirects.
self.curl.setopt(pycurl.FOLLOWLOCATION, True)
self.curl.setopt(pycurl.MAXREDIRS, 1)
self.curl.setopt(pycurl.UNRESTRICTED_AUTH, True)
# The below settings are to prevent the connection from hanging if the
# connection breaks silently. Since marathon-lb only listens, silent
# connection failure results in marathon-lb waiting infinitely.
#
# Minimum bytes/second below which it is considered "low speed". So
# "low speed" here refers to 0 bytes/second.
self.curl.setopt(pycurl.LOW_SPEED_LIMIT, 1)
# How long (in seconds) it's allowed to go below the speed limit
# before it times out
self.curl.setopt(pycurl.LOW_SPEED_TIME, 300)
if auth and type(auth) is DCOSAuth:
auth.refresh_auth_header()
headers.append('Authorization: %s' % auth.auth_header)
elif auth:
self.curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
self.curl.setopt(pycurl.USERPWD, '%s:%s' % auth)
if verify:
self.curl.setopt(pycurl.CAINFO, verify)
else:
self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)
self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)
self.curl.setopt(pycurl.HTTPHEADER, headers)
self.curlmulti = pycurl.CurlMulti()
self.curlmulti.add_handle(self.curl)
self.status_code = 0
SELECT_TIMEOUT = 10
def _any_data_received(self):
return self.received_buffer.tell() != 0
def _get_received_data(self):
result = self.received_buffer.getvalue()
self.received_buffer.truncate(0)
self.received_buffer.seek(0)
return result
def _check_status_code(self):
if self.status_code == 0:
self.status_code = self.curl.getinfo(pycurl.HTTP_CODE)
if self.status_code != 0 and self.status_code != 200:
raise Exception(str(self.status_code) + ' ' + self.url)
def _perform_on_curl(self):
while True:
ret, num_handles = self.curlmulti.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
return num_handles
def _iter_chunks(self):
while True:
remaining = self._perform_on_curl()
if self._any_data_received():
self._check_status_code()
yield self._get_received_data()
if remaining == 0:
break
self.curlmulti.select(self.SELECT_TIMEOUT)
self._check_status_code()
self._check_curl_errors()
def _check_curl_errors(self):
for f in self.curlmulti.info_read()[2]:
raise pycurl.error(*f[1:])
def iter_lines(self):
chunks = self._iter_chunks()
return self._split_lines_from_chunks(chunks)
@staticmethod
def _split_lines_from_chunks(chunks):
# same behaviour as requests' Response.iter_lines(...)
pending = None
for chunk in chunks:
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
def resolve_ip(host):
"""
:return: string, an empty string indicates that no ip was found.
"""
cached_ip = ip_cache.get().get(host, "")
if cached_ip != "":
return cached_ip
else:
try:
logger.debug("trying to resolve ip address for host %s", host)
ip = socket.gethostbyname(host)
ip_cache.get().set(host, ip)
return ip
except socket.gaierror:
return ""
class LRUCacheSingleton(object):
def __init__(self):
self.lru_cache = None
def get(self):
if self.lru_cache is None:
self.lru_cache = LRUCache()
return self.lru_cache
def set(self, lru_cache):
self.lru_cache = lru_cache
ip_cache = LRUCacheSingleton()
def get_app_networking_mode(app):
mode = 'host'
if app.get('ipAddress'):
mode = 'container'
_mode = app.get('container', {})\
.get('docker', {})\
.get('network', '')
if _mode == 'USER':
mode = 'container'
elif _mode == 'BRIDGE':
mode = 'container/bridge'
networks = app.get('networks', [])
for n in networks:
# Modes cannot be mixed, so assigning the last mode is fine
mode = n.get('mode', 'container')
return mode
def get_task_ip(task, mode):
"""
:return: string, an empty string indicates that no ip was found.
"""
if mode == 'container':
task_ip_addresses = task.get('ipAddresses', [])
if len(task_ip_addresses) == 0:
logger.warning("Task %s does not yet have an ip address allocated",
task['id'])
return ""
task_ip = task_ip_addresses[0].get('ipAddress', "")
if task_ip == "":
logger.warning("Task %s does not yet have an ip address allocated",
task['id'])
return ""
return task_ip
else:
host = task.get('host', "")
if host == "":
logger.warning("Could not find task host, ignoring")
return ""
task_ip = resolve_ip(host)
if task_ip == "":
logger.warning("Could not resolve ip for host %s, ignoring",
host)
return ""
return task_ip
def get_app_port_mappings(app):
"""
:return: list
"""
portMappings = app.get('container', {})\
.get('docker', {})\
.get('portMappings', [])
if len(portMappings) > 0:
return portMappings
return app.get('container', {})\
.get('portMappings', [])
def get_task_ports(task):
"""
:return: list
"""
return task.get('ports', [])
def get_port_definition_ports(app):
"""
:return: list
"""
port_definitions = app.get('portDefinitions', [])
return [p['port'] for p in port_definitions if 'port' in p]
def get_ip_address_discovery_ports(app):
"""
:return: list
"""
ip_address = app.get('ipAddress', {})
if len(ip_address) == 0:
return []
discovery = app.get('ipAddress', {}).get('discovery', {})
return [int(p['number'])
for p in discovery.get('ports', [])
if 'number' in p]
def get_port_mapping_ports(app):
"""
:return: list
"""
port_mappings = get_app_port_mappings(app)
return [p['containerPort'] for p in port_mappings if 'containerPort' in p]
def get_app_task_ports(app, task, mode):
"""
:return: list
"""
if mode == 'host':
task_ports = get_task_ports(task)
if len(task_ports) > 0:
return task_ports
return get_port_definition_ports(app)
elif mode == 'container/bridge':
task_ports = get_task_ports(task)
if len(task_ports) > 0:
return task_ports
# Will only work for Marathon < 1.5
task_ports = get_port_definition_ports(app)
if len(task_ports) > 0:
return task_ports
return get_port_mapping_ports(app)
else:
task_ports = get_ip_address_discovery_ports(app)
if len(task_ports) > 0:
return task_ports
return get_port_mapping_ports(app)
def get_task_ip_and_ports(app, task):
"""
Return the IP address and list of ports used to access a task. For a
task using IP-per-task, this is the IP address of the task, and the ports
exposed by the task services. Otherwise, this is the IP address of the
host and the ports exposed by the host.
:param app: The application owning the task.
:param task: The task.
:return: Tuple of (ip address, [ports]). Returns (None, None) if no IP
address could be resolved or found for the task.
"""
mode = get_app_networking_mode(app)
task_ip = get_task_ip(task, mode)
task_ports = get_app_task_ports(app, task, mode)
# The overloading of empty string, and empty list as False is intentional.
if not (task_ip and task_ports):
return None, None
logger.debug("Returning: %r, %r", task_ip, task_ports)
return task_ip, task_ports
| apache-2.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/io/formats/test_printing.py | 8 | 7359 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import compat
import pandas.io.formats.printing as printing
import pandas.io.formats.format as fmt
import pandas.core.config as cf
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
assert res == repr(b)
res = printing.pprint_thing(b, quote_strings=False)
assert res == b
class TestFormattBase(object):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 13
assert adj.len(cols[1]) == 13
assert adj.len(cols[2]) == 16
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 23
assert adj.len(cols[1]) == 23
assert adj.len(cols[2]) == 26
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
assert just('abc', 5, mode='left') == 'abc '
assert just('abc', 5, mode='center') == ' abc '
assert just('abc', 5, mode='right') == ' abc'
assert just(u'abc', 5, mode='left') == 'abc '
assert just(u'abc', 5, mode='center') == ' abc '
assert just(u'abc', 5, mode='right') == ' abc'
assert just(u'パンダ', 5, mode='left') == u'パンダ'
assert just(u'パンダ', 5, mode='center') == u'パンダ'
assert just(u'パンダ', 5, mode='right') == u'パンダ'
assert just(u'パンダ', 10, mode='left') == u'パンダ '
assert just(u'パンダ', 10, mode='center') == u' パンダ '
assert just(u'パンダ', 10, mode='right') == u' パンダ'
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len('abc') == 3
assert adj.len(u'abc') == 3
assert adj.len(u'パンダ') == 6
assert adj.len(u'パンダ') == 5
assert adj.len(u'パンダpanda') == 11
assert adj.len(u'パンダpanda') == 10
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 4
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 6
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
class TestTableSchemaRepr(object):
@classmethod
def setup_class(cls):
pytest.importorskip('IPython')
from IPython.core.interactiveshell import InteractiveShell
cls.display_formatter = InteractiveShell.instance().display_formatter
def test_publishes(self):
df = pd.DataFrame({"A": [1, 2]})
objects = [df['A'], df, df] # dataframe / series
expected_keys = [
{'text/plain', 'application/vnd.dataresource+json'},
{'text/plain', 'text/html', 'application/vnd.dataresource+json'},
]
opt = pd.option_context('display.html.table_schema', True)
for obj, expected in zip(objects, expected_keys):
with opt:
formatted = self.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
with_latex = pd.option_context('display.latex.repr', True)
with opt, with_latex:
formatted = self.display_formatter.format(obj)
expected = {'text/plain', 'text/html', 'text/latex',
'application/vnd.dataresource+json'}
assert set(formatted[0].keys()) == expected
def test_publishes_not_implemented(self):
# column MultiIndex
# GH 15996
midx = pd.MultiIndex.from_product([['A', 'B'], ['a', 'b', 'c']])
df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)
opt = pd.option_context('display.html.table_schema', True)
with opt:
formatted = self.display_formatter.format(df)
expected = {'text/plain', 'text/html'}
assert set(formatted[0].keys()) == expected
def test_config_on(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", True):
result = df._repr_data_resource_()
assert result is not None
def test_config_default_off(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", False):
result = df._repr_data_resource_()
assert result is None
def test_enable_data_resource_formatter(self):
# GH 10491
formatters = self.display_formatter.formatters
mimetype = 'application/vnd.dataresource+json'
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# still there, just disabled
assert 'application/vnd.dataresource+json' in formatters
assert not formatters[mimetype].enabled
# able to re-set
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# smoke test that it works
self.display_formatter.format(cf)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# pytest.skip
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
| apache-2.0 |
norheim/pextant | pextant/api.py | 2 | 3350 | import csv
import json
import logging
import re
from pextant.solvers.astarMesh import astarSolver
from pextant.analysis.loadWaypoints import JSONloader
import matplotlib.pyplot as plt
logger = logging.getLogger()
class Pathfinder:
"""
This class performs the A* path finding algorithm and contains the Cost Functions. Also includes
capabilities for analysis of a path.
This class still needs performance testing for maps of larger sizes. I don't believe that
we will be doing anything extremely computationally intensive though.
Current cost functions are Time, Distance, and (Metabolic) Energy. It would be useful to be able to
optimize on other resources like battery power or water sublimated, but those are significantly more
difficult because they depend on shadowing and was not implemented by Aaron.
"""
def __init__(self, explorer_model, environmental_model):
cheating = 1
self.solver = astarSolver(environmental_model, explorer_model,
optimize_on = 'Energy', heuristic_accelerate = cheating)
def aStarCompletePath(self, optimize_on, waypoints, returnType="JSON", dh=None, fileName=None ):
pass
def completeSearch(self, optimize_on, waypoints, filepath=None ):
"""
Returns a tuple representing the path and the total cost of the path.
The path will be a list. All activity points will be duplicated in
the returned path.
waypoints is a list of activityPoint objects, in the correct order. fileName is
used when we would like to write stuff to a file and is currently necessary
for csv return types.
"""
segmentsout, rawpoints, items = self.solver.solvemultipoint(waypoints)
if filepath:
extension = re.search('^(.+\/[^/]+)\.(\w+)$', filepath).group(2)
else:
extension = None
if extension == "json":
json.dump(segmentsout.tojson(), filepath)
elif extension == "csv":
header = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]
rows = header + segmentsout.tocsv()
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in rows:
writer.writerow(row)
return rows
return segmentsout, rawpoints, items
def completeSearchFromJSON(self, optimize_on, jsonInput, filepath=None, algorithm="A*",
numTestPoints=0):
jloader = JSONloader.from_string(jsonInput)
waypoints = jloader.get_waypoints()
#if algorithm == "A*":
segmentsout,_,_ = self.completeSearch(optimize_on, waypoints, filepath)
updatedjson = jloader.add_search_sol(segmentsout.list)
return updatedjson
if __name__ == '__main__':
from pextant.analysis.loadWaypoints import loadPoints
from explorers import Astronaut
from EnvironmentalModel import GDALMesh
hi_low = GDALMesh('maps/HI_lowqual_DEM.tif')
waypoints = loadPoints('waypoints/HI_13Nov16_MD7_A.json')
env_model = hi_low.loadSubSection(waypoints.geoEnvelope())
astronaut = Astronaut(80)
pathfinder = Pathfinder(astronaut, env_model)
out = pathfinder.aStarCompletePath('Energy', waypoints)
print out | mit |
redfuture/linux-kernel | tools/perf/scripts/python/netdev-times.py | 1544 | 15191 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
poettering/systemd | test/rule-syntax-check.py | 19 | 2468 | #!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
#
# Simple udev rules syntax checker
#
# © 2010 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
import re
import sys
import os
from glob import glob
rules_files = sys.argv[1:]
if not rules_files:
sys.exit('Specify files to test as arguments')
quoted_string_re = r'"(?:[^\\"]|\\.)*"'
no_args_tests = re.compile(r'(ACTION|DEVPATH|KERNELS?|NAME|SYMLINK|SUBSYSTEMS?|DRIVERS?|TAG|PROGRAM|RESULT|TEST)\s*(?:=|!)=\s*' + quoted_string_re + '$')
# PROGRAM can also be specified as an assignment.
program_assign = re.compile(r'PROGRAM\s*=\s*' + quoted_string_re + '$')
args_tests = re.compile(r'(ATTRS?|ENV|CONST|TEST){([a-zA-Z0-9/_.*%-]+)}\s*(?:=|!)=\s*' + quoted_string_re + '$')
no_args_assign = re.compile(r'(NAME|SYMLINK|OWNER|GROUP|MODE|TAG|RUN|LABEL|GOTO|OPTIONS|IMPORT)\s*(?:\+=|:=|=)\s*' + quoted_string_re + '$')
args_assign = re.compile(r'(ATTR|ENV|IMPORT|RUN){([a-zA-Z0-9/_.*%-]+)}\s*(=|\+=)\s*' + quoted_string_re + '$')
# Find comma-separated groups, but allow commas that are inside quoted strings.
# Using quoted_string_re + '?' so that strings missing the last double quote
# will still match for this part that splits on commas.
comma_separated_group_re = re.compile(r'(?:[^,"]|' + quoted_string_re + '?)+')
result = 0
buffer = ''
for path in rules_files:
print('# looking at {}'.format(path))
lineno = 0
for line in open(path):
lineno += 1
# handle line continuation
if line.endswith('\\\n'):
buffer += line[:-2]
continue
else:
line = buffer + line
buffer = ''
# filter out comments and empty lines
line = line.strip()
if not line or line.startswith('#'):
continue
# Separator ',' is normally optional but we make it mandatory here as
# it generally improves the readability of the rules.
for clause_match in comma_separated_group_re.finditer(line):
clause = clause_match.group().strip()
if not (no_args_tests.match(clause) or args_tests.match(clause) or
no_args_assign.match(clause) or args_assign.match(clause) or
program_assign.match(clause)):
print('Invalid line {}:{}: {}'.format(path, lineno, line))
print(' clause:', clause)
print()
result = 1
break
sys.exit(result)
| gpl-2.0 |
mdhaber/scipy | scipy/signal/wavelets.py | 16 | 14046 | import numpy as np
from scipy.linalg import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.empty((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet2(M, s, w=5):
"""
Complex Morlet wavelet, designed to work with `cwt`.
Returns the complete version of morlet wavelet, normalised
according to `s`::
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
Parameters
----------
M : int
Length of the wavelet.
s : float
Width parameter of the wavelet.
w : float, optional
Omega0. Default is 5
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
Notes
-----
.. versionadded:: 1.4.0
This function was designed to work with `cwt`. Because `morlet2`
returns an array of complex numbers, the `dtype` argument of `cwt`
should be set to `complex128` for best results.
Note the difference in implementation with `morlet`.
The fundamental frequency of this wavelet in Hz is given by::
f = w*fs / (2*s*np.pi)
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
Similarly we can get the wavelet width parameter at ``f``::
s = w*fs / (2*f*np.pi)
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet2(M, s, w)
>>> plt.plot(abs(wavelet))
>>> plt.show()
This example shows basic use of `morlet2` with `cwt` in time-frequency
analysis:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
>>> fs = 1/dt
>>> w = 6.
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
>>> freq = np.linspace(1, fs/2, 100)
>>> widths = w*fs / (2*freq*np.pi)
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
>>> plt.show()
"""
x = np.arange(0, M) - (M - 1.0) / 2
x = x / s
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
output = np.sqrt(1/s) * wavelet
return output
def cwt(data, wavelet, widths, dtype=None, **kwargs):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter. The `wavelet` function
is allowed to be complex.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
dtype : data-type, optional
The desired data type of output. Defaults to ``float64`` if the
output of `wavelet` is real and ``complex128`` if it is complex.
.. versionadded:: 1.4.0
kwargs
Keyword arguments passed to wavelet function.
.. versionadded:: 1.4.0
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
.. versionadded:: 1.4.0
For non-symmetric, complex-valued wavelets, the input signal is convolved
with the time-reversed complex-conjugate of the wavelet data [1].
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
**kwargs))[::-1], mode='same')
References
----------
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
Academic Press, 2009.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
if wavelet == ricker:
window_size = kwargs.pop('window_size', None)
# Determine output type
if dtype is None:
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
dtype = np.complex128
else:
dtype = np.float64
output = np.empty((len(widths), len(data)), dtype=dtype)
for ind, width in enumerate(widths):
N = np.min([10 * width, len(data)])
# the conditional block below and the window_size
# kwarg pop above may be removed eventually; these
# are shims for 32-bit arch + NumPy <= 1.14.5 to
# address gh-11095
if wavelet == ricker and window_size is None:
ceil = np.ceil(N)
if ceil != N:
N = int(N)
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
output[ind] = convolve(data, wavelet_data, mode='same')
return output
| bsd-3-clause |
LaoQi/icode | mypylib/remoto/lib/vendor/execnet/deprecated.py | 3 | 1554 | """
some deprecated calls
(c) 2008-2009, Holger Krekel and others
"""
import execnet
def PopenGateway(python=None):
""" instantiate a gateway to a subprocess
started with the given 'python' executable.
"""
APIWARN("1.0.0b4", "use makegateway('popen')")
spec = execnet.XSpec("popen")
spec.python = python
return execnet.default_group.makegateway(spec)
def SocketGateway(host, port):
""" This Gateway provides interaction with a remote process
by connecting to a specified socket. On the remote
side you need to manually start a small script
(py/execnet/script/socketserver.py) that accepts
SocketGateway connections or use the experimental
new_remote() method on existing gateways.
"""
APIWARN("1.0.0b4", "use makegateway('socket=host:port')")
spec = execnet.XSpec("socket=%s:%s" %(host, port))
return execnet.default_group.makegateway(spec)
def SshGateway(sshaddress, remotepython=None, ssh_config=None):
""" instantiate a remote ssh process with the
given 'sshaddress' and remotepython version.
you may specify an ssh_config file.
"""
APIWARN("1.0.0b4", "use makegateway('ssh=host')")
spec = execnet.XSpec("ssh=%s" % sshaddress)
spec.python = remotepython
spec.ssh_config = ssh_config
return execnet.default_group.makegateway(spec)
def APIWARN(version, msg, stacklevel=3):
import warnings
Warn = DeprecationWarning("(since version %s) %s" %(version, msg))
warnings.warn(Warn, stacklevel=stacklevel)
| gpl-2.0 |
vmax-feihu/hue | desktop/libs/notebook/src/notebook/decorators.py | 2 | 3507 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.http import Http404
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.models import Document2, Document
from notebook.connectors.base import QueryExpired, QueryError, SessionExpired, AuthenticationRequired
LOG = logging.getLogger(__name__)
def check_document_access_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
notebook_id = request.GET.get('notebook')
if not notebook_id:
notebook_id = json.loads(request.POST.get('notebook', '{}')).get('id')
try:
if notebook_id:
document = Document2.objects.get(id=notebook_id)
document.doc.get().can_read_or_exception(request.user)
except Document2.DoesNotExist:
raise PopupException(_('Document %(id)s does not exist') % {'id': notebook_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_document_modify_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
notebook = json.loads(request.POST.get('notebook', '{}'))
try:
if notebook.get('id'):
doc2 = Document2.objects.get(id=notebook['id'])
doc2.doc.get().can_write_or_exception(request.user)
except Document.DoesNotExist:
raise PopupException(_('Job %(id)s does not exist') % {'id': notebook.get('id')})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def api_error_handler(func):
def decorator(*args, **kwargs):
response = {}
try:
return func(*args, **kwargs)
except SessionExpired, e:
response['status'] = -2
except QueryExpired, e:
response['status'] = -3
except AuthenticationRequired, e:
response['status'] = 401
except QueryError, e:
LOG.exception('error running %s' % func)
response['status'] = 1
response['message'] = force_unicode(str(e))
except Exception, e:
LOG.exception('error running %s' % func)
response['status'] = -1
response['message'] = force_unicode(str(e))
finally:
if response:
return JsonResponse(response)
return decorator
def json_error_handler(view_fn):
def decorator(*args, **kwargs):
try:
return view_fn(*args, **kwargs)
except Http404, e:
raise e
except Exception, e:
response = {
'error': str(e)
}
return JsonResponse(response, status=500)
return decorator
| apache-2.0 |
takis/odoo | addons/edi/models/res_partner.py | 437 | 4243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv
from edi import EDIMixin
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
RES_PARTNER_EDI_STRUCT = {
'name': True,
'ref': True,
'lang': True,
'website': True,
'email': True,
'street': True,
'street2': True,
'zip': True,
'city': True,
'country_id': True,
'state_id': True,
'phone': True,
'fax': True,
'mobile': True,
}
class res_partner(osv.osv, EDIMixin):
_inherit = "res.partner"
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
return super(res_partner,self).edi_export(cr, uid, records,
edi_struct or dict(RES_PARTNER_EDI_STRUCT),
context=context)
def _get_bank_type(self, cr, uid, context=None):
# first option: the "normal" bank type, installed by default
res_partner_bank_type = self.pool.get('res.partner.bank.type')
try:
return self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'bank_normal', context=context).code
except ValueError:
pass
# second option: create a new custom type for EDI or use it if already created, as IBAN type is
# not always appropriate: we need a free-form bank type for max flexibility (users can correct
# data manually after import)
code, label = 'edi_generic', 'Generic Bank Type (auto-created for EDI)'
bank_code_ids = res_partner_bank_type.search(cr, uid, [('code','=',code)], context=context)
if not bank_code_ids:
_logger.info('Normal bank account type is missing, creating '
'a generic bank account type for EDI.')
self.res_partner_bank_type.create(cr, SUPERUSER_ID, {'name': label,
'code': label})
return code
def edi_import(self, cr, uid, edi_document, context=None):
# handle bank info, if any
edi_bank_ids = edi_document.pop('bank_ids', None)
contact_id = super(res_partner,self).edi_import(cr, uid, edi_document, context=context)
if edi_bank_ids:
contact = self.browse(cr, uid, contact_id, context=context)
import_ctx = dict((context or {}),
default_partner_id = contact.id,
default_state=self._get_bank_type(cr, uid, context))
for ext_bank_id, bank_name in edi_bank_ids:
try:
self.edi_import_relation(cr, uid, 'res.partner.bank',
bank_name, ext_bank_id, context=import_ctx)
except osv.except_osv:
# failed to import it, try again with unrestricted default type
_logger.warning('Failed to import bank account using'
'bank type: %s, ignoring', import_ctx['default_state'],
exc_info=True)
return contact_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jmartinezchaine/OpenERP | openerp/addons/web_livechat/__init__.py | 9 | 1049 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import publisher_warranty
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EvanK/ansible | lib/ansible/module_utils/known_hosts.py | 52 | 6939 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import hmac
import re
from ansible.module_utils.six.moves.urllib.parse import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
fqdn = None
port = None
ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except Exception:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
except Exception:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
if port:
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
else:
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
if rc != 0 or not out:
msg = 'failed to retrieve hostkey'
if not out:
msg += '. "%s" returned no matches.' % this_cmd
else:
msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
if err:
msg += ' [stderr]: %s' % err
module.fail_json(msg=msg)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 |
tomkralidis/geonode | geonode/people/tests.py | 1 | 11607 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test.utils import override_settings
from geonode.tests.base import GeoNodeBaseTestSupport
from django.core import mail
from django.urls import reverse
from django.db import transaction
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from geonode.layers import utils
from geonode.layers.models import Layer
from geonode.people import profileextractors
class TestSetUnsetUserLayerPermissions(GeoNodeBaseTestSupport):
def setUp(self):
super(TestSetUnsetUserLayerPermissions, self).setUp()
self.layers = Layer.objects.all()[:3]
self.layer_ids = [layer.pk for layer in self.layers]
self.user_ids = ','.join([str(element.pk) for element in get_user_model().objects.all()[:3]])
self.permission_type = ('r', 'w', 'd')
self.groups = Group.objects.all()[:3]
self.group_ids = ','.join([str(element.pk) for element in self.groups])
def test_redirect_on_get_request(self):
"""
Test that an immediate redirect occurs back to the admin
page of origin when no IDS are supplied
"""
self.client.login(username="admin", password="admin")
response = self.client.get(reverse('set_user_layer_permissions'))
self.assertEqual(response.status_code, 302)
def test_admin_only_access(self):
"""
Test that only admin users can access the routes
"""
self.client.login(username="bobby", password="bob")
response = self.client.get(reverse('set_user_layer_permissions'))
self.assertEqual(response.status_code, 401)
@override_settings(ASYNC_SIGNALS=False)
def test_set_unset_user_layer_permissions(self):
"""
Test that user permissions are set for layers
"""
self.client.login(username="admin", password="admin")
response = self.client.post(reverse('set_user_layer_permissions'), data={
'ids': self.user_ids,
'layers': self.layer_ids,
'permission_type': self.permission_type,
'mode': 'set'
})
self.assertEqual(response.status_code, 302)
with transaction.atomic():
for permissions_name in self.permission_type:
utils.set_layers_permissions(
permissions_name,
[resource.name for resource in Layer.objects.filter(
id__in=[int(_id) for _id in self.layer_ids])],
[user.username for user in get_user_model().objects.filter(id__in=self.user_ids.split(","))],
[],
False,
verbose=True
)
for layer in self.layers:
perm_spec = layer.get_all_level_info()
self.assertTrue(get_user_model().objects.all()[0] in perm_spec["users"])
@override_settings(ASYNC_SIGNALS=False)
def test_set_unset_group_layer_permissions(self):
"""
Test that group permissions are set for layers
"""
self.client.login(username="admin", password="admin")
response = self.client.post(reverse('set_group_layer_permissions'), data={
'ids': self.group_ids,
'layers': self.layer_ids,
'permission_type': self.permission_type,
'mode': 'set'
})
self.assertEqual(response.status_code, 302)
with transaction.atomic():
for permissions_name in self.permission_type:
utils.set_layers_permissions(
permissions_name,
[resource.name for resource in Layer.objects.filter(
id__in=[int(_id) for _id in self.layer_ids])],
[],
[group.name for group in Group.objects.filter(id__in=self.group_ids.split(","))],
False,
verbose=True
)
for layer in self.layers:
perm_spec = layer.get_all_level_info()
self.assertTrue(self.groups[0] in perm_spec["groups"])
@override_settings(ASYNC_SIGNALS=False)
def test_unset_group_layer_perms(self):
"""
Test that group permissions are unset for layers
"""
user = get_user_model().objects.all()[0]
for layer in self.layers:
layer.set_permissions({'users': {user.username: [
'change_layer_data', 'view_resourcebase',
'download_resourcebase', 'change_resourcebase_metadata']}})
self.client.login(username="admin", password="admin")
response = self.client.post(reverse('set_user_layer_permissions'), data={
'ids': self.user_ids,
'layers': self.layer_ids,
'permission_type': self.permission_type,
'mode': 'unset'
})
self.assertEqual(response.status_code, 302)
with transaction.atomic():
for permissions_name in self.permission_type:
utils.set_layers_permissions(
permissions_name,
[resource.name for resource in Layer.objects.filter(
id__in=[int(_id) for _id in self.layer_ids])],
[user.username for user in get_user_model().objects.filter(id__in=self.user_ids.split(","))],
[],
True,
verbose=True
)
for layer in self.layers:
perm_spec = layer.get_all_level_info()
self.assertTrue(user not in perm_spec["users"])
class PeopleTest(GeoNodeBaseTestSupport):
fixtures = ['initial_data.json', 'people_data.json']
def test_forgot_username(self):
url = reverse('forgot_username')
# page renders
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# and responds for a bad email
response = self.client.post(url, data={
'email': 'foobar@doesnotexist.com'
})
self.assertContains(response, "No user could be found with that email address.")
default_contact = get_user_model().objects.get(username='default_contact')
response = self.client.post(url, data={
'email': default_contact.email
})
# and sends a mail for a good one
self.assertEqual(len(mail.outbox), 1)
site = Site.objects.get_current()
# Verify that the subject of the first message is correct.
self.assertEqual(
mail.outbox[0].subject,
"Your username for " +
site.name)
class FacebookExtractorTestCase(GeoNodeBaseTestSupport):
def setUp(self):
super(FacebookExtractorTestCase, self).setUp()
self.data = {
"email": "phony_mail",
"first_name": "phony_first_name",
"last_name": "phony_last_name",
"cover": "phony_cover",
}
self.extractor = profileextractors.FacebookExtractor()
def test_extract_area(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_area(self.data)
def test_extract_city(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_city(self.data)
def test_extract_country(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_country(self.data)
def test_extract_delivery(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_delivery(self.data)
def test_extract_email(self):
result = self.extractor.extract_email(self.data)
self.assertEqual(result, self.data["email"])
def test_extract_fax(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_fax(self.data)
def test_extract_first_name(self):
result = self.extractor.extract_first_name(self.data)
self.assertEqual(result, self.data["first_name"])
def test_extract_last_name(self):
result = self.extractor.extract_last_name(self.data)
self.assertEqual(result, self.data["last_name"])
def test_extract_organization(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_organization(self.data)
def test_extract_position(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_position(self.data)
def test_extract_profile(self):
result = self.extractor.extract_profile(self.data)
self.assertEqual(result, self.data["cover"])
def test_extract_voice(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_voice(self.data)
def test_extract_zipcode(self):
with self.assertRaises(NotImplementedError):
self.extractor.extract_zipcode(self.data)
class LinkedInExtractorTestCase(GeoNodeBaseTestSupport):
def setUp(self):
super(LinkedInExtractorTestCase, self).setUp()
self.data = {
"id": "REDACTED",
"firstName": {
"localized": {
"en_US": "Tina"
},
"preferredLocale": {
"country": "US",
"language": "en"
}
},
"lastName": {
"localized": {
"en_US": "Belcher"
},
"preferredLocale": {
"country": "US",
"language": "en"
}
},
"profilePicture": {
"displayImage": "urn:li:digitalmediaAsset:B54328XZFfe2134zTyq"
},
"elements": [
{
"handle": "urn:li:emailAddress:3775708763",
"handle~": {
"emailAddress": "hsimpson@linkedin.com"
}
}
]
}
self.extractor = profileextractors.LinkedInExtractor()
def test_extract_email(self):
result = self.extractor.extract_email(self.data)
self.assertEqual(
result,
self.data["elements"][0]["handle~"]["emailAddress"]
)
def test_extract_first_name(self):
result = self.extractor.extract_first_name(self.data)
self.assertEqual(
result,
self.data["firstName"]["localized"]["en_US"]
)
def test_extract_last_name(self):
result = self.extractor.extract_last_name(self.data)
self.assertEqual(
result,
self.data["lastName"]["localized"]["en_US"]
)
| gpl-3.0 |
supersven/intellij-community | python/lib/Lib/fileinput.py | 80 | 14262 | """Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not callable(openhook):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except (AttributeError, OSError):
# AttributeError occurs in Jython, where there's no
# os.fstat.
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
| apache-2.0 |
talbrecht/pism_pik06 | doc/site-packages/pybtex/style/sorting/none.py | 4 | 1296 | # Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.style.sorting import BaseSortingStyle
class SortingStyle(BaseSortingStyle):
name = 'none'
def sort(self, entries):
return entries
| gpl-3.0 |
HyperBaton/ansible | lib/ansible/module_utils/network/ingate/common.py | 38 | 2183 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ingate Systems AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from ingate import ingatesdk
HAS_INGATESDK = True
except ImportError:
HAS_INGATESDK = False
def ingate_argument_spec(**kwargs):
client_options = dict(
version=dict(choices=['v1'], default='v1'),
scheme=dict(choices=['http', 'https'], required=True),
address=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
port=dict(type='int'),
timeout=dict(type='int'),
validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
)
argument_spec = dict(
client=dict(type='dict', required=True,
options=client_options),
)
argument_spec.update(kwargs)
return argument_spec
def ingate_create_client(**kwargs):
api_client = ingate_create_client_noauth(**kwargs)
# Authenticate and get hold of a security token.
api_client.authenticate()
# Return the client.
return api_client
def ingate_create_client_noauth(**kwargs):
client_params = kwargs['client']
# Create API client.
api_client = ingatesdk.Client(client_params['version'],
client_params['scheme'],
client_params['address'],
client_params['username'],
client_params['password'],
port=client_params['port'],
timeout=client_params['timeout'])
# Check if we should skip SSL Certificate verification.
verify_ssl = client_params.get('validate_certs')
if not verify_ssl:
api_client.skip_verify_certificate()
# Return the client.
return api_client
def is_ingatesdk_installed(module):
if not HAS_INGATESDK:
module.fail_json(msg="The Ingate Python SDK module is required for this module.")
| gpl-3.0 |
yangchandle/FlaskTaskr | env/lib/python3.5/site-packages/jinja2/bccache.py | 256 | 12289 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import stat
import sys
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 2
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
self.code = marshal_load(f)
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU) # 0o700
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
raise RuntimeError('Temporary directory \'%s\' has an incorrect '
'owner, permissions, or type.' % actual_dir)
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| mit |
ryfeus/lambda-packs | Selenium_PhantomJS/source/docutils/writers/docutils_xml.py | 108 | 6981 | # $Id: docutils_xml.py 7497 2012-08-16 15:17:29Z milde $
# Author: David Goodger, Paul Tremblay, Guenter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
# Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib
# replaces its own xml module with PyXML if the latter is installed. However,
# PyXML is no longer maintained and partially incompatible/buggy.) Reverse
# the order in which xml module and submodules are searched to import stdlib
# modules if they exist and PyXML modules if they do not exist in the stdlib.
#
# See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030
# and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html
import xml
if "_xmlplus" in xml.__path__[0]: # PyXML sub-module
xml.__path__.reverse() # If both are available, prefer stdlib over PyXML
import xml.sax.saxutils
from StringIO import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, unicode) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException, error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
| mit |
abenzbiria/clients_odoo | addons/sale_mrp/tests/__init__.py | 114 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_move_explode
checks = [
test_move_explode,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ibm-security-intelligence/api-samples | siem/02_HideOffense.py | 1 | 4990 | #!/usr/bin/env python3
# This sample demonstrates how to use the siem endpoint in the
# REST API.
# This sample is interactive.
# For this scenario to work there must already be offenses on the system the
# sample is being run against.
# THIS SAMPLE WILL MAKE CHANGES TO THE OFFENSE IT IS RUN AGAINST
# The scenario demonstrates the following actions:
# - How to get offenses with the status OPEN using the filter parameter
# - How to get a single offense given the ID
# - How to decode data received and access the information
# - How to hide an offense
# To view a list of the endpoints with the parameters they accept, you can view
# the REST API interactive help page on your deployment at
# https://<hostname>/api_doc. You can also retrieve a list of available
# endpoints with the REST API itself at the /api/help/endpoints endpoint.
import json
import os
import sys
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def main():
# First we have to create our client
client = client_module.RestApiClient(version='6.0')
# Send in the request to GET all the OPEN offenses, but only showing some
# of the fields, enough to distinguish the offenses.
SampleUtilities.pretty_print_request(
client, 'siem/offenses?fields=id,description,status,offense_type,' +
'offense_source&filter=status=OPEN', 'GET')
response = client.call_api(
'siem/offenses?fields=id,description,status,offense_type,' +
'offense_source&filter=status=OPEN', 'GET')
# Print out the result
SampleUtilities.pretty_print_response(response)
if (response.code != 200):
print('Call Failed')
sys.exit(1)
# Prompt the user for an ID
offense_ID = input(
'Select an offense to hide. Please type its ID or quit. ')
# Error checking because we want to make sure the user has selected an OPEN
# offense.
while True:
if (offense_ID == 'quit'):
exit(0)
# Make the request to 'GET' the offense chosen by the user
SampleUtilities.pretty_print_request(client, 'siem/offenses/' +
str(offense_ID), 'GET')
response = client.call_api('siem/offenses/' + str(offense_ID), 'GET')
# Save a copy of the data, decoding it into a string so that
# we can read it
response_text = response.read().decode('utf-8')
# Check response code to see if the offense exists
if (response.code == 200):
# Reformat the data string into a dictionary so that we
# easily access the information.
response_body = json.loads(response_text)
# Ensure the offense is OPEN
if (response_body['status'] != 'OPEN'):
offense_ID = input('The offense you selected is not OPEN. ' +
'Please try again or type quit. ')
else:
# Only breaks when the ID exists and is OPEN
break
else:
offense_ID = input('An offense by that ID does not exist. ' +
'Please try again or type quit. ')
# Prints the response, which has already been decoded.
# **Only works on responses that have been decoded**
print(json.dumps(response_body, indent=4))
while True:
# As this sample uses data on your system, ensure that the user wants
# to hide the offense.
confirmation = input(
'Are you sure you want to hide this offense? ' +
'This will affect the status of the offense. (YES/no)\n')
if (confirmation == 'YES'):
# Sends in the POST request to update the offense. Also using
# fields to trim down the data received by POST.
SampleUtilities.pretty_print_request(
client, 'siem/offenses/' + offense_ID +
'?status=HIDDEN&fields=id,description,status,' +
'offense_type,offense_source', 'POST')
response = client.call_api(
'siem/offenses/' + offense_ID + '?status=HIDDEN' +
'&fields=id,description,status,offense_type,offense_source',
'POST')
# Prints the data received by POST
SampleUtilities.pretty_print_response(response)
if (response.code != 200):
print('Call Failed')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
print('Offense ' + offense_ID + ' hidden')
break
elif (confirmation == 'no'):
print('You have decided not to hide offense ' + offense_ID +
'. This sample will now end.')
break
else:
print(confirmation + ' is not a valid response.')
if __name__ == "__main__":
main()
| apache-2.0 |
OrkoHunter/networkx | networkx/generators/nonisomorphic_trees.py | 43 | 5554 | """
Implementation of the Wright, Richmond, Odlyzko and McKay (WROM)
algorithm for the enumeration of all non-isomorphic free trees of a
given order. Rooted trees are represented by level sequences, i.e.,
lists in which the i-th element specifies the distance of vertex i to
the root.
"""
# Copyright (C) 2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["Aric Hagberg (hagberg@lanl.gov)",
"Mridul Seth (seth.mridul@gmail.com)"])
__all__ = ['nonisomorphic_trees',
'number_of_nonisomorphic_trees']
import networkx as nx
def nonisomorphic_trees(order, create="graph"):
"""Returns a list of nonisomporphic trees
Parameters
----------
order : int
order of the desired tree(s)
create : graph or matrix (default="Graph)
If graph is selected a list of trees will be returned,
if matrix is selected a list of adjancency matrix will
be returned
Returns
-------
G : List of NetworkX Graphs
M : List of Adjacency matrices
References
----------
"""
if order < 2:
raise ValueError
# start at the path graph rooted at its center
layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))
while layout is not None:
layout = _next_tree(layout)
if layout is not None:
if create == "graph":
yield _layout_to_graph(layout)
elif create == "matrix":
yield _layout_to_matrix(layout)
layout = _next_rooted_tree(layout)
def number_of_nonisomorphic_trees(order):
"""Returns the number of nonisomorphic trees
Parameters
----------
order : int
order of the desired tree(s)
Returns
-------
length : Number of nonisomorphic graphs for the given order
References
----------
"""
return sum(1 for _ in nonisomorphic_trees(order))
def _next_rooted_tree(predecessor, p=None):
"""One iteration of the Beyer-Hedetniemi algorithm."""
if p is None:
p = len(predecessor) - 1
while predecessor[p] == 1:
p -= 1
if p == 0:
return None
q = p - 1
while predecessor[q] != predecessor[p] - 1:
q -= 1
result = list(predecessor)
for i in range(p, len(result)):
result[i] = result[i - p + q]
return result
def _next_tree(candidate):
"""One iteration of the Wright, Richmond, Odlyzko and McKay
algorithm."""
# valid representation of a free tree if:
# there are at least two vertices at layer 1
# (this is always the case because we start at the path graph)
left, rest = _split_tree(candidate)
# and the left subtree of the root
# is less high than the tree with the left subtree removed
left_height = max(left)
rest_height = max(rest)
valid = rest_height >= left_height
if valid and rest_height == left_height:
# and, if left and rest are of the same height,
# if left does not encompass more vertices
if len(left) > len(rest):
valid = False
# and, if they have the same number or vertices,
# if left does not come after rest lexicographically
elif len(left) == len(rest) and left > rest:
valid = False
if valid:
return candidate
else:
# jump to the next valid free tree
p = len(left)
new_candidate = _next_rooted_tree(candidate, p)
if candidate[p] > 2:
new_left, new_rest = _split_tree(new_candidate)
new_left_height = max(new_left)
suffix = range(1, new_left_height + 2)
new_candidate[-len(suffix):] = suffix
return new_candidate
def _split_tree(layout):
"""Return a tuple of two layouts, one containing the left
subtree of the root vertex, and one containing the original tree
with the left subtree removed."""
one_found = False
m = None
for i in range(len(layout)):
if layout[i] == 1:
if one_found:
m = i
break
else:
one_found = True
if m is None:
m = len(layout)
left = [layout[i] - 1 for i in range(1, m)]
rest = [0] + [layout[i] for i in range(m, len(layout))]
return (left, rest)
def _layout_to_matrix(layout):
"""Create the adjacency matrix for the tree specified by the
given layout (level sequence)."""
result = [[0] * len(layout) for i in range(len(layout))]
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
result[i][j] = result[j][i] = 1
stack.append(i)
return result
def _layout_to_graph(layout):
"""Create a NetworkX Graph for the tree specified by the
given layout(level sequence)"""
result = [[0] * len(layout) for i in range(len(layout))]
G = nx.Graph()
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
G.add_edge(i, j)
stack.append(i)
return G
| bsd-3-clause |
Slezhuk/ansible | lib/ansible/modules/cloud/amazon/efs.py | 15 | 21079 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs
short_description: create and maintain EFS file systems
description:
- Module allows create, search and destroy Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
- "Artem Kazakov (@akazakov)"
options:
state:
description:
- Allows to create, search and destroy Amazon EFS file system
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
required: false
default: None
id:
description:
- ID of Amazon EFS. Either name or ID required for delete.
required: false
default: None
performance_mode:
description:
- File system's performance mode to use. Only takes effect during creation.
required: false
default: 'general_purpose'
choices: ['general_purpose', 'max_io']
tags:
description:
- "List of tags of Amazon EFS. Should be defined as dictionary
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
This data may be modified for existing EFS using state 'present' and new list of mount targets."
required: false
default: None
wait:
description:
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
required: false
default: "no"
choices: ["yes", "no"]
wait_timeout:
description:
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
required: false
default: 0
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# EFS provisioning
- efs:
state: present
name: myTestEFS
tags:
name: myTestNameTag
purpose: file-storage
targets:
- subnet_id: subnet-748c5d03
security_groups: [ "sg-1a2b3c4d" ]
# Modifying EFS data
- efs:
state: present
name: myTestEFS
tags:
name: myAnotherTestTag
targets:
- subnet_id: subnet-7654fdca
security_groups: [ "sg-4c5d6f7a" ]
# Deleting EFS
- efs:
state: absent
name: myTestEFS
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned:
type: datetime
sample: 2015-11-16 07:30:57-05:00
creation_token:
description: EFS creation token
returned:
type: UUID
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned:
type: unique ID
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned:
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned:
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned:
type: list of dicts
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned:
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned:
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned:
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned:
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned:
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned:
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
import sys
from time import sleep
from time import time as timestamp
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
self.wait = module.params.get('wait')
self.wait_timeout = module.params.get('wait_timeout')
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def get_file_system_id(self, name):
"""
Returns ID of instance by instance name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name
))
return info and info['FileSystemId'] or None
def get_file_system_state(self, name, file_system_id=None):
"""
Returns state of filesystem by EFS id/name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name,
FileSystemId=file_system_id
))
return info and info['LifeCycleState'] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
FileSystemId=file_system_id
)
if states:
if not isinstance(states, list):
states = [states]
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
return list(targets)
def create_file_system(self, name, performance_mode):
"""
Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED
)
self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
changed = True
# we always wait for the state to be available when creating.
# if we try to take any actions on the file system before it's available
# we'll throw errors
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
self.wait_timeout
)
return changed
def converge_file_system(self, name, tags, targets):
"""
Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
if tags_to_delete:
self.connection.delete_tags(
FileSystemId=fs_id,
TagKeys=[item[0] for item in tags_to_delete]
)
result = True
if tags_to_create:
self.connection.create_tags(
FileSystemId=fs_id,
Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
)
result = True
if targets is not None:
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
targets = index_by_subnet_id(targets)
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
targets, True)
""" To modify mount target it should be deleted and created again """
changed = filter(
lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
current_targets[sid], targets[sid]), intersection)
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
self.connection.delete_mount_target(
MountTargetId=current_targets[sid]['MountTargetId']
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
result = True
if targets_to_create:
for sid in targets_to_create:
self.connection.create_mount_target(
FileSystemId=fs_id,
**targets[sid]
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
self.wait_timeout
)
result = True
security_groups_to_update = filter(
lambda sid: 'SecurityGroups' in targets[sid] and
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
intersection
)
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
MountTargetId=current_targets[sid]['MountTargetId'],
SecurityGroups=targets[sid]['SecurityGroups']
)
result = True
return result
def delete_file_system(self, name, file_system_id=None):
"""
Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
self.delete_mount_targets(file_system_id)
self.connection.delete_file_system(FileSystemId=file_system_id)
result = True
if self.wait:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
self.wait_timeout
)
return result
def delete_mount_targets(self, file_system_id):
"""
Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
0
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
0
)
return len(targets) > 0
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def targets_equal(keys, a, b):
"""
Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
return False
return True
def dict_diff(dict1, dict2, by_key=False):
"""
Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
intersection = keys1 & keys2
return keys2 ^ intersection, intersection, keys1 ^ intersection
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
else:
sleep(5)
continue
break
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[]),
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
wait=dict(required=False, type="bool", default=False),
wait_timeout=dict(required=False, type="int", default=0)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
target_translations = {
'ip_address': 'IpAddress',
'security_groups': 'SecurityGroups',
'subnet_id': 'SubnetId'
}
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
performance_mode_translations = {
'general_purpose': 'generalPurpose',
'max_io': 'maxIO'
}
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
changed = False
state = str(module.params.get('state')).lower()
if state == 'present':
if not name:
module.fail_json(msg='Name parameter is required for create')
changed = connection.create_file_system(name, performance_mode)
changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
result = first_or_default(connection.get_file_systems(CreationToken=name))
elif state == 'absent':
if not name and not fs_id:
module.fail_json(msg='Either name or id parameter is required for delete')
changed = connection.delete_file_system(name, fs_id)
result = None
if result:
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=changed, efs=result)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jungle90/Openstack-Swift-I-O-throttler | build/lib.linux-x86_64-2.7/swift/common/ring/ring.py | 10 | 17490 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import cPickle as pickle
import inspect
from collections import defaultdict
from gzip import GzipFile
from os.path import getmtime
import struct
from time import time
import os
from io import BufferedReader
from hashlib import md5
from itertools import chain
from tempfile import NamedTemporaryFile
from swift.common.utils import hash_path, validate_configuration, json
from swift.common.ring.utils import tiers_for_dev
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
for dev in self.devs:
if dev is not None:
dev.setdefault("region", 1)
@classmethod
def deserialize_v1(cls, gz_file):
json_len, = struct.unpack('!I', gz_file.read(4))
ring_dict = json.loads(gz_file.read(json_len))
ring_dict['replica2part2dev_id'] = []
partition_count = 1 << (32 - ring_dict['part_shift'])
for x in xrange(ring_dict['replica_count']):
ring_dict['replica2part2dev_id'].append(
array.array('H', gz_file.read(2 * partition_count)))
return ring_dict
@classmethod
def load(cls, filename):
"""
Load ring data from a file.
:param filename: Path to a file serialized by the save() method.
:returns: A RingData instance containing the loaded data.
"""
gz_file = GzipFile(filename, 'rb')
# Python 2.6 GzipFile doesn't support BufferedIO
if hasattr(gz_file, '_checkReadable'):
gz_file = BufferedReader(gz_file)
# See if the file is in the new format
magic = gz_file.read(4)
if magic == 'R1NG':
version, = struct.unpack('!H', gz_file.read(2))
if version == 1:
ring_data = cls.deserialize_v1(gz_file)
else:
raise Exception('Unknown ring format version %d' % version)
else:
# Assume old-style pickled ring
gz_file.seek(0)
ring_data = pickle.load(gz_file)
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'])
return ring_data
def serialize_v1(self, file_obj):
# Write out new-style serialization magic and version:
file_obj.write(struct.pack('!4sH', 'R1NG', 1))
ring = self.to_dict()
json_encoder = json.JSONEncoder(sort_keys=True)
json_text = json_encoder.encode(
{'devs': ring['devs'], 'part_shift': ring['part_shift'],
'replica_count': len(ring['replica2part2dev_id'])})
json_len = len(json_text)
file_obj.write(struct.pack('!I', json_len))
file_obj.write(json_text)
for part2dev_id in ring['replica2part2dev_id']:
file_obj.write(part2dev_id.tostring())
def save(self, filename, mtime=1300507380.0):
"""
Serialize this RingData instance to disk.
:param filename: File into which this instance should be serialized.
:param mtime: time used to override mtime for gzip, default or None
if the caller wants to include time
"""
# Override the timestamp so that the same ring data creates
# the same bytes on disk. This makes a checksum comparison a
# good way to see if two rings are identical.
#
# This only works on Python 2.7; on 2.6, we always get the
# current time in the gzip output.
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
mtime=mtime)
else:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
self.serialize_v1(gz_file)
gz_file.close()
tempf.flush()
os.fsync(tempf.fileno())
tempf.close()
os.chmod(tempf.name, 0o644)
os.rename(tempf.name, filename)
def to_dict(self):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift}
class Ring(object):
"""
Partitioned consistent hashing ring.
:param serialized_path: path to serialized RingData instance
:param reload_time: time interval in seconds to check for a ring change
"""
def __init__(self, serialized_path, reload_time=15, ring_name=None):
# can't use the ring unless HASH_PATH_SUFFIX is set
validate_configuration()
if ring_name:
self.serialized_path = os.path.join(serialized_path,
ring_name + '.ring.gz')
else:
self.serialized_path = os.path.join(serialized_path)
self.reload_time = reload_time
self._reload(force=True)
def _reload(self, force=False):
self._rtime = time() + self.reload_time
if force or self.has_changed():
ring_data = RingData.load(self.serialized_path)
self._mtime = getmtime(self.serialized_path)
self._devs = ring_data.devs
# NOTE(akscram): Replication parameters like replication_ip
# and replication_port are required for
# replication process. An old replication
# ring doesn't contain this parameters into
# device. Old-style pickled rings won't have
# region information.
for dev in self._devs:
if dev:
dev.setdefault('region', 1)
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
# Do this now, when we know the data has changed, rather than
# doing it on every call to get_more_nodes().
regions = set()
zones = set()
ip_ports = set()
self._num_devs = 0
for dev in self._devs:
if dev:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
ip_ports.add((dev['region'], dev['zone'],
dev['ip'], dev['port']))
self._num_devs += 1
self._num_regions = len(regions)
self._num_zones = len(zones)
self._num_ip_ports = len(ip_ports)
def _rebuild_tier_data(self):
self.tier2devs = defaultdict(list)
for dev in self._devs:
if not dev:
continue
for tier in tiers_for_dev(dev):
self.tier2devs[tier].append(dev)
tiers_by_length = defaultdict(list)
for tier in self.tier2devs:
tiers_by_length[len(tier)].append(tier)
self.tiers_by_length = sorted(tiers_by_length.values(),
key=lambda x: len(x[0]))
for tiers in self.tiers_by_length:
tiers.sort()
@property
def replica_count(self):
"""Number of replicas (full or partial) used in the ring."""
return len(self._replica2part2dev_id)
@property
def partition_count(self):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
@property
def devs(self):
"""devices in the ring"""
if time() > self._rtime:
self._reload()
return self._devs
def has_changed(self):
"""
Check to see if the ring on disk is different than the current one in
memory.
:returns: True if the ring on disk has changed, False otherwise
"""
return getmtime(self.serialized_path) != self._mtime
def _get_part_nodes(self, part):
part_nodes = []
seen_ids = set()
for r2p2d in self._replica2part2dev_id:
if part < len(r2p2d):
dev_id = r2p2d[part]
if dev_id not in seen_ids:
part_nodes.append(self.devs[dev_id])
seen_ids.add(dev_id)
return [dict(node, index=i) for i, node in enumerate(part_nodes)]
def get_part(self, account, container=None, obj=None):
"""
Get the partition for an account/container/object.
:param account: account name
:param container: container name
:param obj: object name
:returns: the partition number
"""
key = hash_path(account, container, obj, raw_digest=True)
if time() > self._rtime:
self._reload()
part = struct.unpack_from('>I', key)[0] >> self._part_shift
return part
def get_part_nodes(self, part):
"""
Get the nodes that are responsible for the partition. If one
node is responsible for more than one replica of the same
partition, it will only appear in the output once.
:param part: partition to get nodes for
:returns: list of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
return self._get_part_nodes(part)
def get_nodes(self, account, container=None, obj=None):
"""
Get the partition and nodes for an account/container/object.
If a node is responsible for more than one replica, it will
only appear in the output once.
:param account: account name
:param container: container name
:param obj: object name
:returns: a tuple of (partition, list of node dicts)
Each node dict will have at least the following keys:
====== ===============================================================
id unique integer identifier amongst devices
index offset into the primary node list for the partition
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
"""
part = self.get_part(account, container, obj)
return part, self._get_part_nodes(part)
def get_more_nodes(self, part):
"""
Generator to get extra nodes for a partition for hinted handoff.
The handoff nodes will try to be in zones other than the
primary zones, will take into account the device weights, and
will usually keep the same sequences of handoffs even with
ring changes.
:param part: partition to get handoff nodes for
:returns: generator of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
primary_nodes = self._get_part_nodes(part)
used = set(d['id'] for d in primary_nodes)
same_regions = set(d['region'] for d in primary_nodes)
same_zones = set((d['region'], d['zone']) for d in primary_nodes)
same_ip_ports = set((d['region'], d['zone'], d['ip'], d['port'])
for d in primary_nodes)
parts = len(self._replica2part2dev_id[0])
start = struct.unpack_from(
'>I', md5(str(part)).digest())[0] >> self._part_shift
inc = int(parts / 65536) or 1
# Multiple loops for execution speed; the checks and bookkeeping get
# simpler as you go along
hit_all_regions = len(same_regions) == self._num_regions
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_regions:
# At this point, there are no regions left untouched, so we
# can stop looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
region = dev['region']
if dev_id not in used and region not in same_regions:
yield dev
used.add(dev_id)
same_regions.add(region)
zone = dev['zone']
ip_port = (region, zone, dev['ip'], dev['port'])
same_zones.add((region, zone))
same_ip_ports.add(ip_port)
if len(same_regions) == self._num_regions:
hit_all_regions = True
break
hit_all_zones = len(same_zones) == self._num_zones
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_zones:
# Much like we stopped looking for fresh regions before, we
# can now stop looking for fresh zones; there are no more.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
zone = (dev['region'], dev['zone'])
if dev_id not in used and zone not in same_zones:
yield dev
used.add(dev_id)
same_zones.add(zone)
ip_port = zone + (dev['ip'], dev['port'])
same_ip_ports.add(ip_port)
if len(same_zones) == self._num_zones:
hit_all_zones = True
break
hit_all_ip_ports = len(same_ip_ports) == self._num_ip_ports
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_ip_ports:
# We've exhausted the pool of unused backends, so stop
# looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
ip_port = (dev['region'], dev['zone'],
dev['ip'], dev['port'])
if dev_id not in used and ip_port not in same_ip_ports:
yield dev
used.add(dev_id)
same_ip_ports.add(ip_port)
if len(same_ip_ports) == self._num_ip_ports:
hit_all_ip_ports = True
break
hit_all_devs = len(used) == self._num_devs
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_devs:
# We've used every device we have, so let's stop looking for
# unused devices now.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
if dev_id not in used:
yield self._devs[dev_id]
used.add(dev_id)
if len(used) == self._num_devs:
hit_all_devs = True
break
| apache-2.0 |
Philippe12/external_chromium_org | chrome/common/extensions/docs/server2/cron_servlet.py | 23 | 11814 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import (
GetAppVersion, IsDeadlineExceededError, logservice)
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Gettable, Future
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix=''):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|.
'''
files = [name for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(delegate=Gettable(resolve))
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Rendering the public templates will also pull in all of the private
# templates.
results.append(request_files_in_dir(PUBLIC_TEMPLATES))
# Rendering the public templates will have pulled in the .js and
# manifest.json files (for listing examples on the API reference pages),
# but there are still images, CSS, etc.
results.append(request_files_in_dir(STATIC_DOCS, prefix='static'))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
if not IsDevServer():
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Fetch the zip file of each example (contains all the individual
# files).
example_zips = []
for root, _, files in trunk_fs.Walk(EXAMPLES):
example_zips.extend(
root + '.zip' for name in files if name == 'manifest.json')
results.append(_RequestEachItem(
'example zips',
example_zips,
lambda path: render('extensions/examples/' + path)))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider)
| bsd-3-clause |
florian-dacosta/OpenUpgrade | addons/fleet/fleet.py | 33 | 47691 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
import datetime
from openerp import tools
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
from dateutil.relativedelta import relativedelta
def str_to_datetime(strdate):
return datetime.datetime.strptime(strdate, tools.DEFAULT_SERVER_DATE_FORMAT)
class fleet_vehicle_cost(osv.Model):
_name = 'fleet.vehicle.cost'
_description = 'Cost related to a vehicle'
_order = 'date desc, vehicle_id asc'
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, False)
for record in self.browse(cr,uid,ids,context=context):
if record.odometer_id:
res[record.id] = record.odometer_id.value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if not value:
raise except_orm(_('Operation not allowed!'), _('Emptying the odometer value of a vehicle is not allowed.'))
date = self.browse(cr, uid, id, context=context).date
if not(date):
date = fields.date.context_today(self, cr, uid, context=context)
vehicle_id = self.browse(cr, uid, id, context=context).vehicle_id
data = {'value': value, 'date': date, 'vehicle_id': vehicle_id.id}
odometer_id = self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
return self.write(cr, uid, id, {'odometer_id': odometer_id}, context=context)
def _year_get_fnc(self, cr, uid, ids, name, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
if (record.date):
res[record.id] = str(time.strptime(record.date, tools.DEFAULT_SERVER_DATE_FORMAT).tm_year)
else:
res[record.id] = _('Unknown')
return res
_columns = {
'name': fields.related('vehicle_id', 'name', type="char", string='Name', store=True),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log'),
'cost_subtype_id': fields.many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost'),
'amount': fields.float('Total Price'),
'cost_type': fields.selection([('contract', 'Contract'), ('services','Services'), ('fuel','Fuel'), ('other','Other')], 'Category of the cost', help='For internal purpose only', required=True),
'parent_id': fields.many2one('fleet.vehicle.cost', 'Parent', help='Parent cost to this current cost'),
'cost_ids': fields.one2many('fleet.vehicle.cost', 'parent_id', 'Included Services'),
'odometer_id': fields.many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Odometer Value', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
'date' :fields.date('Date',help='Date when the cost has been executed'),
'contract_id': fields.many2one('fleet.vehicle.log.contract', 'Contract', help='Contract attached to this cost'),
'auto_generated': fields.boolean('Automatically Generated', readonly=True, required=True),
'year': fields.function(_year_get_fnc, type="char", string='Year', store=True),
}
_defaults ={
'cost_type': 'other',
}
def create(self, cr, uid, data, context=None):
#make sure that the data are consistent with values of parent and contract records given
if 'parent_id' in data and data['parent_id']:
parent = self.browse(cr, uid, data['parent_id'], context=context)
data['vehicle_id'] = parent.vehicle_id.id
data['date'] = parent.date
data['cost_type'] = parent.cost_type
if 'contract_id' in data and data['contract_id']:
contract = self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, data['contract_id'], context=context)
data['vehicle_id'] = contract.vehicle_id.id
data['cost_subtype_id'] = contract.cost_subtype_id.id
data['cost_type'] = contract.cost_type
if 'odometer' in data and not data['odometer']:
#if received value for odometer is 0, then remove it from the data as it would result to the creation of a
#odometer log with 0, which is to be avoided
del(data['odometer'])
return super(fleet_vehicle_cost, self).create(cr, uid, data, context=context)
class fleet_vehicle_tag(osv.Model):
_name = 'fleet.vehicle.tag'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class fleet_vehicle_state(osv.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages")
}
_sql_constraints = [('fleet_state_name_unique','unique(name)', 'State name already exists')]
class fleet_vehicle_model(osv.Model):
def _model_name_get_fnc(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.modelname
if record.brand_id.name:
name = record.brand_id.name + ' / ' + name
res[record.id] = name
return res
def on_change_brand(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {'value': {'image_medium': False}}
brand = self.pool.get('fleet.vehicle.model.brand').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': brand.image,
}
}
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
_columns = {
'name': fields.function(_model_name_get_fnc, type="char", string='Name', store=True),
'modelname': fields.char('Model name', size=32, required=True),
'brand_id': fields.many2one('fleet.vehicle.model.brand', 'Model Brand', required=True, help='Brand of the vehicle'),
'vendors': fields.many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors'),
'image': fields.related('brand_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('brand_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('brand_id', 'image_small', type="binary", string="Logo (small)"),
}
class fleet_vehicle_model_brand(osv.Model):
_name = 'fleet.vehicle.model.brand'
_description = 'Brand model of the vehicle'
_order = 'name asc'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Brand Name', size=64, required=True),
'image': fields.binary("Logo",
help="This field holds the image used as logo for the brand, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized logo of the brand. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the brand. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class fleet_vehicle(osv.Model):
_inherit = 'mail.thread'
def _vehicle_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = record.model_id.brand_id.name + '/' + record.model_id.modelname + ' / ' + record.license_plate
return res
def return_action_to_open(self, cr, uid, ids, context=None):
""" This opens the xml view specified in xml_id for the current vehicle """
if context is None:
context = {}
if context.get('xml_id'):
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet', context['xml_id'], context=context)
res['context'] = context
res['context'].update({'default_vehicle_id': ids[0]})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
return False
def act_show_log_cost(self, cr, uid, ids, context=None):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet','fleet_vehicle_costs_act', context=context)
res['context'] = context
res['context'].update({
'default_vehicle_id': ids[0],
'search_default_parent_false': True
})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, 0)
for record in self.browse(cr,uid,ids,context=context):
ids = self.pool.get('fleet.vehicle.odometer').search(cr, uid, [('vehicle_id', '=', record.id)], limit=1, order='value desc')
if len(ids) > 0:
res[record.id] = self.pool.get('fleet.vehicle.odometer').browse(cr, uid, ids[0], context=context).value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if value:
date = fields.date.context_today(self, cr, uid, context=context)
data = {'value': value, 'date': date, 'vehicle_id': id}
return self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
def _search_get_overdue_contract_reminder(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today,))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_contract_renewal_due_soon(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
datetime_today = datetime.datetime.strptime(today, tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = str((datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date > %s AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today, limit_date))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _get_contract_reminder_fnc(self, cr, uid, ids, field_names, unknow_none, context=None):
res= {}
for record in self.browse(cr, uid, ids, context=context):
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'toclose') and element.expiration_date:
current_date_str = fields.date.context_today(self, cr, uid, context=context)
due_time_str = element.expiration_date
current_date = str_to_datetime(current_date_str)
due_time = str_to_datetime(due_time_str)
diff_time = (due_time-current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < 15 and diff_time >= 0:
due_soon = True;
total += 1
if overdue or due_soon:
ids = self.pool.get('fleet.vehicle.log.contract').search(cr,uid,[('vehicle_id', '=', record.id), ('state', 'in', ('open', 'toclose'))], limit=1, order='expiration_date asc')
if len(ids) > 0:
#we display only the name of the oldest overdue/due soon contract
name=(self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, ids[0], context=context).cost_subtype_id.name)
res[record.id] = {
'contract_renewal_overdue': overdue,
'contract_renewal_due_soon': due_soon,
'contract_renewal_total': (total - 1), #we remove 1 from the real total for display purposes
'contract_renewal_name': name,
}
return res
def _get_default_state(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'vehicle_state_active')
except ValueError:
model_id = False
return model_id
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Odometer = self.pool['fleet.vehicle.odometer']
LogFuel = self.pool['fleet.vehicle.log.fuel']
LogService = self.pool['fleet.vehicle.log.services']
LogContract = self.pool['fleet.vehicle.log.contract']
Cost = self.pool['fleet.vehicle.cost']
return {
vehicle_id: {
'odometer_count': Odometer.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'fuel_logs_count': LogFuel.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'service_count': LogService.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'contract_count': LogContract.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'cost_count': Cost.search_count(cr, uid, [('vehicle_id', '=', vehicle_id), ('parent_id', '=', False)], context=context)
}
for vehicle_id in ids
}
_name = 'fleet.vehicle'
_description = 'Information on a vehicle'
_order= 'license_plate asc'
_columns = {
'name': fields.function(_vehicle_name_get_fnc, type="char", string='Name', store=True),
'company_id': fields.many2one('res.company', 'Company'),
'license_plate': fields.char('License Plate', size=32, required=True, help='License plate number of the vehicle (ie: plate number for a car)'),
'vin_sn': fields.char('Chassis Number', size=32, help='Unique number written on the vehicle motor (VIN/SN number)'),
'driver_id': fields.many2one('res.partner', 'Driver', help='Driver of the vehicle'),
'model_id': fields.many2one('fleet.vehicle.model', 'Model', required=True, help='Model of the vehicle'),
'log_fuel': fields.one2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs'),
'log_services': fields.one2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs'),
'log_contracts': fields.one2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts'),
'cost_count': fields.function(_count_all, type='integer', string="Costs" , multi=True),
'contract_count': fields.function(_count_all, type='integer', string='Contracts', multi=True),
'service_count': fields.function(_count_all, type='integer', string='Services', multi=True),
'fuel_logs_count': fields.function(_count_all, type='integer', string='Fuel Logs', multi=True),
'odometer_count': fields.function(_count_all, type='integer', string='Odometer', multi=True),
'acquisition_date': fields.date('Acquisition Date', required=False, help='Date when the vehicle has been bought'),
'color': fields.char('Color', size=32, help='Color of the vehicle'),
'state_id': fields.many2one('fleet.vehicle.state', 'State', help='Current state of the vehicle', ondelete="set null"),
'location': fields.char('Location', size=128, help='Location of the vehicle (garage, ...)'),
'seats': fields.integer('Seats Number', help='Number of seats of the vehicle'),
'doors': fields.integer('Doors Number', help='Number of doors of the vehicle'),
'tag_ids' :fields.many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id','tag_id', 'Tags'),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Last Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.selection([('kilometers', 'Kilometers'),('miles','Miles')], 'Odometer Unit', help='Unit of the odometer ',required=True),
'transmission': fields.selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle'),
'fuel_type': fields.selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle'),
'horsepower': fields.integer('Horsepower'),
'horsepower_tax': fields.float('Horsepower Taxation'),
'power': fields.integer('Power', help='Power in kW of the vehicle'),
'co2': fields.float('CO2 Emissions', help='CO2 emissions of the vehicle'),
'image': fields.related('model_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('model_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('model_id', 'image_small', type="binary", string="Logo (small)"),
'contract_renewal_due_soon': fields.function(_get_contract_reminder_fnc, fnct_search=_search_contract_renewal_due_soon, type="boolean", string='Has Contracts to renew', multi='contract_info'),
'contract_renewal_overdue': fields.function(_get_contract_reminder_fnc, fnct_search=_search_get_overdue_contract_reminder, type="boolean", string='Has Contracts Overdued', multi='contract_info'),
'contract_renewal_name': fields.function(_get_contract_reminder_fnc, type="text", string='Name of contract to renew soon', multi='contract_info'),
'contract_renewal_total': fields.function(_get_contract_reminder_fnc, type="integer", string='Total of contracts due or overdue minus one', multi='contract_info'),
'car_value': fields.float('Car Value', help='Value of the bought vehicle'),
}
_defaults = {
'doors': 5,
'odometer_unit': 'kilometers',
'state_id': _get_default_state,
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'log_fuel':[],
'log_contracts':[],
'log_services':[],
'tag_ids':[],
'vin_sn':'',
})
return super(fleet_vehicle, self).copy(cr, uid, id, default, context=context)
def on_change_model(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {}
model = self.pool.get('fleet.vehicle.model').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': model.image,
}
}
def create(self, cr, uid, data, context=None):
if not context:
context = {}
context.update({'mail_create_nolog': True})
vehicle_id = super(fleet_vehicle, self).create(cr, uid, data, context=context)
vehicle = self.browse(cr, uid, vehicle_id, context=context)
self.message_post(cr, uid, [vehicle_id], body=_('%s %s has been added to the fleet!') % (vehicle.model_id.name,vehicle.license_plate), context=context)
return vehicle_id
def write(self, cr, uid, ids, vals, context=None):
"""
This function write an entry in the openchatter whenever we change important information
on the vehicle like the model, the drive, the state of the vehicle or its license plate
"""
for vehicle in self.browse(cr, uid, ids, context):
changes = []
if 'model_id' in vals and vehicle.model_id.id != vals['model_id']:
value = self.pool.get('fleet.vehicle.model').browse(cr,uid,vals['model_id'],context=context).name
oldmodel = vehicle.model_id.name or _('None')
changes.append(_("Model: from '%s' to '%s'") %(oldmodel, value))
if 'driver_id' in vals and vehicle.driver_id.id != vals['driver_id']:
value = self.pool.get('res.partner').browse(cr,uid,vals['driver_id'],context=context).name
olddriver = (vehicle.driver_id.name) or _('None')
changes.append(_("Driver: from '%s' to '%s'") %(olddriver, value))
if 'state_id' in vals and vehicle.state_id.id != vals['state_id']:
value = self.pool.get('fleet.vehicle.state').browse(cr,uid,vals['state_id'],context=context).name
oldstate = vehicle.state_id.name or _('None')
changes.append(_("State: from '%s' to '%s'") %(oldstate, value))
if 'license_plate' in vals and vehicle.license_plate != vals['license_plate']:
old_license_plate = vehicle.license_plate or _('None')
changes.append(_("License Plate: from '%s' to '%s'") %(old_license_plate, vals['license_plate']))
if len(changes) > 0:
self.message_post(cr, uid, [vehicle.id], body=", ".join(changes), context=context)
vehicle_id = super(fleet_vehicle,self).write(cr, uid, ids, vals, context)
return True
class fleet_vehicle_odometer(osv.Model):
_name='fleet.vehicle.odometer'
_description='Odometer log for a vehicle'
_order='date desc'
def _vehicle_log_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.date:
name = name+ ' / '+ str(record.date)
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'unit': odometer_unit,
}
}
_columns = {
'name': fields.function(_vehicle_log_name_get_fnc, type="char", string='Name', store=True),
'date': fields.date('Date'),
'value': fields.float('Odometer Value', group_operator="max"),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True),
'unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
}
_defaults = {
'date': fields.date.context_today,
}
class fleet_vehicle_log_fuel(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def on_change_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
else :
return {}
def on_change_price_per_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
else :
return {}
def on_change_amount(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value': {'price_per_liter': round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value': {'liter': round(amount / price_per_liter,2),}}
elif liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value': {'amount': round(liter * price_per_liter,2),}}
else :
return {}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_refueling')
except ValueError:
model_id = False
return model_id
_name = 'fleet.vehicle.log.fuel'
_description = 'Fuel log for vehicles'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_columns = {
'liter': fields.float('Liter'),
'price_per_liter': fields.float('Price Per Liter'),
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference', size=64),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'fuel',
}
class fleet_vehicle_log_services(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_service_8')
except ValueError:
model_id = False
return model_id
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.services'
_description = 'Services for vehicles'
_columns = {
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference', size=64),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'services'
}
class fleet_service_type(osv.Model):
_name = 'fleet.service.type'
_description = 'Type of services available on a vehicle'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'category': fields.selection([('contract', 'Contract'), ('service', 'Service'), ('both', 'Both')], 'Category', required=True, help='Choose wheter the service refer to contracts, vehicle services or both'),
}
class fleet_vehicle_log_contract(osv.Model):
def scheduler_manage_auto_costs(self, cr, uid, context=None):
#This method is called by a cron task
#It creates costs for contracts having the "recurring cost" field setted, depending on their frequency
#For example, if a contract has a reccuring cost of 200 with a weekly frequency, this method creates a cost of 200 on the first day of each week, from the date of the last recurring costs in the database to today
#If the contract has not yet any recurring costs in the database, the method generates the recurring costs from the start_date to today
#The created costs are associated to a contract thanks to the many2one field contract_id
#If the contract has no start_date, no cost will be created, even if the contract has recurring costs
vehicle_cost_obj = self.pool.get('fleet.vehicle.cost')
d = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT).date()
contract_ids = self.pool.get('fleet.vehicle.log.contract').search(cr, uid, [('state','!=','closed')], offset=0, limit=None, order=None,context=None, count=False)
deltas = {'yearly': relativedelta(years=+1), 'monthly': relativedelta(months=+1), 'weekly': relativedelta(weeks=+1), 'daily': relativedelta(days=+1)}
for contract in self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, contract_ids, context=context):
if not contract.start_date or contract.cost_frequency == 'no':
continue
found = False
last_cost_date = contract.start_date
if contract.generated_cost_ids:
last_autogenerated_cost_id = vehicle_cost_obj.search(cr, uid, ['&', ('contract_id','=',contract.id), ('auto_generated','=',True)], offset=0, limit=1, order='date desc',context=context, count=False)
if last_autogenerated_cost_id:
found = True
last_cost_date = vehicle_cost_obj.browse(cr, uid, last_autogenerated_cost_id[0], context=context).date
startdate = datetime.datetime.strptime(last_cost_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()
if found:
startdate += deltas.get(contract.cost_frequency)
while (startdate <= d) & (startdate <= datetime.datetime.strptime(contract.expiration_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()):
data = {
'amount': contract.cost_generated,
'date': startdate.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'vehicle_id': contract.vehicle_id.id,
'cost_subtype_id': contract.cost_subtype_id.id,
'contract_id': contract.id,
'auto_generated': True
}
cost_id = self.pool.get('fleet.vehicle.cost').create(cr, uid, data, context=context)
startdate += deltas.get(contract.cost_frequency)
return True
def scheduler_manage_contract_expiration(self, cr, uid, context=None):
#This method is called by a cron task
#It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
datetime_today = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = (datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
ids = self.search(cr, uid, ['&', ('state', '=', 'open'), ('expiration_date', '<', limit_date)], offset=0, limit=None, order=None, context=context, count=False)
res = {}
for contract in self.browse(cr, uid, ids, context=context):
if contract.vehicle_id.id in res:
res[contract.vehicle_id.id] += 1
else:
res[contract.vehicle_id.id] = 1
for vehicle, value in res.items():
self.pool.get('fleet.vehicle').message_post(cr, uid, vehicle, body=_('%s contract(s) need(s) to be renewed and/or closed!') % (str(value)), context=context)
return self.write(cr, uid, ids, {'state': 'toclose'}, context=context)
def run_scheduler(self, cr, uid, context=None):
self.scheduler_manage_auto_costs(cr, uid, context=context)
self.scheduler_manage_contract_expiration(cr, uid, context=context)
return True
def _vehicle_contract_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name += ' / '+ record.cost_subtype_id.name
if record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'odometer_unit': odometer_unit,
}
}
def compute_next_year_date(self, strdate):
oneyear = datetime.timedelta(days=365)
curdate = str_to_datetime(strdate)
return datetime.datetime.strftime(curdate + oneyear, tools.DEFAULT_SERVER_DATE_FORMAT)
def on_change_start_date(self, cr, uid, ids, strdate, enddate, context=None):
if (strdate):
return {'value': {'expiration_date': self.compute_next_year_date(strdate),}}
return {}
def get_days_left(self, cr, uid, ids, prop, unknow_none, context=None):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
res = {}
for record in self.browse(cr, uid, ids, context=context):
if (record.expiration_date and (record.state == 'open' or record.state == 'toclose')):
today = str_to_datetime(time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
renew_date = str_to_datetime(record.expiration_date)
diff_time = (renew_date-today).days
res[record.id] = diff_time > 0 and diff_time or 0
else:
res[record.id] = -1
return res
def act_renew_contract(self, cr, uid, ids, context=None):
assert len(ids) == 1, "This operation should only be done for 1 single contract at a time, as it it suppose to open a window as result"
for element in self.browse(cr, uid, ids, context=context):
#compute end date
startdate = str_to_datetime(element.start_date)
enddate = str_to_datetime(element.expiration_date)
diffdate = (enddate - startdate)
default = {
'date': fields.date.context_today(self, cr, uid, context=context),
'start_date': datetime.datetime.strftime(str_to_datetime(element.expiration_date) + datetime.timedelta(days=1), tools.DEFAULT_SERVER_DATE_FORMAT),
'expiration_date': datetime.datetime.strftime(enddate + diffdate, tools.DEFAULT_SERVER_DATE_FORMAT),
}
newid = super(fleet_vehicle_log_contract, self).copy(cr, uid, element.id, default, context=context)
mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'fleet_vehicle_log_contract_form')
return {
'name':_("Renew Contract"),
'view_mode': 'form',
'view_id': modid,
'view_type': 'tree,form',
'res_model': 'fleet.vehicle.log.contract',
'type': 'ir.actions.act_window',
'nodestroy': True,
'domain': '[]',
'res_id': newid,
'context': {'active_id':newid},
}
def _get_default_contract_type(self, cr, uid, context=None):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_contract_leasing')
except ValueError:
model_id = False
return model_id
def on_change_indic_cost(self, cr, uid, ids, cost_ids, context=None):
totalsum = 0.0
for element in cost_ids:
if element and len(element) == 3 and element[2] is not False:
totalsum += element[2].get('amount', 0.0)
return {
'value': {
'sum_cost': totalsum,
}
}
def _get_sum_cost(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for contract in self.browse(cr, uid, ids, context=context):
totalsum = 0
for cost in contract.cost_ids:
totalsum += cost.amount
res[contract.id] = totalsum
return res
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order='state desc,expiration_date'
_columns = {
'name': fields.function(_vehicle_contract_name_get_fnc, type="text", string='Name', store=True),
'start_date': fields.date('Contract Start Date', help='Date when the coverage of the contract begins'),
'expiration_date': fields.date('Contract Expiration Date', help='Date when the coverage of the contract expirates (by default, one year after begin date)'),
'days_left': fields.function(get_days_left, type='integer', string='Warning Date'),
'insurer_id' :fields.many2one('res.partner', 'Supplier'),
'purchaser_id': fields.many2one('res.partner', 'Contractor', help='Person to which the contract is signed for'),
'ins_ref': fields.char('Contract Reference', size=64),
'state': fields.selection([('open', 'In Progress'), ('toclose','To Close'), ('closed', 'Terminated')], 'Status', readonly=True, help='Choose wheter the contract is still valid or not'),
'notes': fields.text('Terms and Conditions', help='Write here all supplementary informations relative to this contract'),
'cost_generated': fields.float('Recurring Cost Amount', help="Costs paid at regular intervals, depending on the cost frequency. If the cost frequency is set to unique, the cost will be logged at the start date"),
'cost_frequency': fields.selection([('no','No'), ('daily', 'Daily'), ('weekly','Weekly'), ('monthly','Monthly'), ('yearly','Yearly')], 'Recurring Cost Frequency', help='Frequency of the recuring cost', required=True),
'generated_cost_ids': fields.one2many('fleet.vehicle.cost', 'contract_id', 'Generated Costs', ondelete='cascade'),
'sum_cost': fields.function(_get_sum_cost, type='float', string='Indicative Costs Total'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'purchaser_id': lambda self, cr, uid, ctx: self.pool.get('res.users').browse(cr, uid, uid, context=ctx).partner_id.id or False,
'date': fields.date.context_today,
'start_date': fields.date.context_today,
'state':'open',
'expiration_date': lambda self, cr, uid, ctx: self.compute_next_year_date(fields.date.context_today(self, cr, uid, context=ctx)),
'cost_frequency': 'no',
'cost_subtype_id': _get_default_contract_type,
'cost_type': 'contract',
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
today = fields.date.context_today(self, cr, uid, context=context)
default['date'] = today
default['start_date'] = today
default['expiration_date'] = self.compute_next_year_date(today)
default['ins_ref'] = ''
default['state'] = 'open'
default['notes'] = ''
return super(fleet_vehicle_log_contract, self).copy(cr, uid, id, default, context=context)
def contract_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'closed'}, context=context)
def contract_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
class fleet_contract_state(osv.Model):
_name = 'fleet.contract.state'
_description = 'Contains the different possible status of a leasing contract'
_columns = {
'name':fields.char('Contract Status', size=64, required=True),
}
| agpl-3.0 |
Southpaw-TACTIC/TACTIC | src/pyasm/application/common/interpreter/tactic_client_lib/tactic_server_stub.py | 2 | 126967 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
# This is a stub for accessing the TACTIC server. It simplifies the access for
# scripts using the client api. Thin wrapper to the client API.
# These are meant to be copied to client directories.
import datetime
import re
import xmlrpclib, os, getpass, shutil, httplib, sys, urllib, types, hashlib
class TacticApiException(Exception):
pass
''' Class: TacticServerStub
It allows client to send commands to and receive information from the TACTIC
server.'''
class TacticServerStub(object):
'''
Constructor: TacticServerStub
'''
def __init__(self, login=None, setup=True, protocol=None, server=None, project=None, ticket=None, user=None, password=""):
'''Function: __init__(login=None, setup=True, protocol=None, server=None, project=None, ticket=None, user=None, password="")
Initialize the TacticServerStub
@keyparam:
login - login_code
setup - if set to True, it runs the protocol set-up
protocol - xmlrpc or local. it defaults to xmlrpc
server - tactic server
project - targeted project
ticket - login ticket key
user - tactic login_code that overrides the login
password - password for login'''
# initialize some variables
if user:
login = user
self.login = login
self.project_code = None
self.server = None
self.has_server = False
self.server_name = None
self.ticket = None # the ticket sent to the server
self.login_ticket = None
self.transaction_ticket = None
# autodetect protocol
if not protocol:
protocol = 'xmlrpc'
try:
import tactic
from pyasm.web import WebContainer
web = WebContainer.get_web()
if web:
server_name = web.get_http_host()
if server_name:
protocol = 'local'
except ImportError:
pass
self.protocol = protocol
# if all of the necessary parameters are set, then
if server and (ticket or login) and project:
self.set_server(server)
self.set_project(project)
if ticket:
self.set_ticket(ticket)
elif login:
# else try with no password (api_require_password)
ticket = self.get_ticket(login, password)
self.set_ticket(ticket)
elif setup:
self._setup(protocol)
# cached handoff dir
self.handoff_dir = None
'''if the function does not exist, call this and make an attempt
'''
def _call_missing_method(self, *args):
# convert from tuple to sequence
args = [x for x in args]
args.insert(0, self.ticket)
return self.server.missing_method(self.missing_method_name, args)
''' DISABLING for now
def __getattr__(self, attr):
self.missing_method_name = attr
return self._call_missing_method
'''
def test_error(self):
return self.server.test_error(self.ticket)
def get_protocol(self):
'''Function: get_protocol()
@return:
string - local or xmlrpc'''
return self.protocol
def set_protocol(self, protocol):
'''Function: get_protocol()
@params
string - local or xmlrpc'''
self.protocol = protocol
def set_ticket(self, ticket):
'''set the login ticket'''
self.set_login_ticket(ticket)
def set_login_ticket(self, ticket):
'''Function: set_login_ticket(ticket)
Set the login ticket with the ticket key'''
self.login_ticket = ticket
self.set_transaction_ticket(ticket)
def set_transaction_ticket(self, ticket):
if self.project_code:
#self.ticket = "%s:%s" % (self.project_code, ticket)
self.ticket = {
'ticket': ticket,
'project': self.project_code,
'language': 'python'
}
else:
raise TacticApiException("No project has been set. Please set a project using method TacticServerStub.set_project()")
#self.ticket = ticket
self.transaction_ticket = ticket
def get_transaction_ticket(self):
return self.transaction_ticket
def get_login_ticket(self):
return self.login_ticket
def get_login(self):
return self.login
def set_server(self, server_name):
'''Function: set_server(server_name)
Set the server name for this XML-RPC server'''
self.server_name = server_name
if self.protocol == "local":
from pyasm.prod.service import ApiXMLRPC
self.server = ApiXMLRPC()
self.server.set_protocol('local')
self.has_server = True
return
if self.server_name.startswith("http://") or self.server_name.startswith("https://"):
url = "%s/tactic/default/Api/" % self.server_name
else:
url = "http://%s/tactic/default/Api/" % self.server_name
#url = "http://localhost:8081/"
# TODO: Not implmeneted: This is needed for isolation of transactions
#if self.transaction_ticket:
# url = '%s%s' % (url, self.transaction_ticket)
self.server = xmlrpclib.Server(url, allow_none=True)
try:
pass
#print(self.server.test(self.ticket))
except httplib.InvalidURL:
raise TacticApiException("You have supplied an invalid server name [%s]" % \
self.server_name)
self.has_server = True
# WARNING: this is changing code in the xmlrpclib library. This
# library is not sending a proper user agent. Hacking it in
# so that at least the OS is sent
if os.name == "nt":
user_agent = 'xmlrpclib.py (Windows)'
else:
user_agent = 'xmlrpclib.py (Linux)'
xmlrpclib.Transport.user_agent = user_agent
def get_server_name(self):
return self.server_name
def get_server(self):
return self.server
def set_project(self, project_code):
'''Function: set_project(project_code)
Set the project code'''
self.project_code = project_code
#self.set_project_state(project_code)
# switch the project code on the ticket
self.set_transaction_ticket(self.transaction_ticket)
def get_project(self):
return self.project_code
#-----------------------------------
# API FUNCTIONS
#
#
#
# Building earch type functions
#
def build_search_type(self, search_type, project_code=None):
'''API Function: build_search_type(search_type, project_code=None)
Convenience method to build a search type from its components. It is
a simple method that build the proper format for project scoped search
types. A full search type has the form:
prod/asset?project=bar.
It uniquely defines a type of sobject in a project.
@param:
search_type - the unique identifier of a search type: ie prod/asset
project_code (optional) - an optional project code. If this is not
included, the project from get_ticket() is added.
@return:
search type string
@example
[code]
search_type = "prod/asset"
full_search_type = server.build_search_type(search_type)
[/code]
'''
# do not append project for sthpw/* search_type
if search_type.startswith('sthpw/'):
return search_type
if not project_code:
project_code = self.project_code
assert project_code
return "%s?project=%s" % (search_type, project_code)
def build_search_key(self, search_type, code, project_code=None, column='code'):
'''API Function: build_search_key(search_type, code, project_code=None, column='code')
Convenience method to build a search key from its components. A
search_key uniquely indentifies a specific sobject. This string
that is returned is heavily used as an argument in the API to
identify an sobject to operate one
A search key has the form: "prod/shot?project=bar&code=XG001"
where search_type = "prod/shot", project_code = "bar" and code = "XG001"
@param:
search_type - the unique identifier of a search type: ie prod/asset
code - the unique code of the sobject
@keyparam:
project_code - an optional project code. If this is not
included, the project from get_ticket() is added.
@return:
string - search key
@example:
[code]
search_type = "prod/asset"
code = "chr001"
search_key = server.build_search_key(search_type, code)
e.g. search_key = prod/asset?project=code=chr001
[/code]
[code]
search_type = "sthpw/login"
code = "admin"
search_key = server.build_search_key(search_type, code, column='code')
e.g. search_key = sthpw/login?code=admin
[/code]
'''
if not project_code:
if not search_type.startswith("sthpw/"):
project_code = self.project_code
assert project_code
if search_type.find('?') == -1:
if search_type.startswith('sthpw/'):
search_key = "%s?%s=%s" %(search_type, column, code)
else:
search_key = "%s?project=%s&%s=%s" % (search_type, project_code, column, code)
else:
search_key = "%s&%s=%s" %(search_type, column, code)
return search_key
def split_search_key(self, search_key):
'''API Function: split_search_key(search_key)
Convenience method to split a search_key in into its search_type and search_code/id components. Note: only accepts the new form prod/asset?project=sample3d&code=chr001
@param:
search_key - the unique identifier of a sobject
@return:
tuple - search type, search code/id
'''
if search_key.find('&') != -1:
search_type, code = search_key.split('&')
else:
# non project-based search_key
search_type, code = search_key.split('?')
codes = code.split('=')
assert len(codes) == 2;
return search_type, codes[1]
def get_home_dir(self):
'''API Function: get_home_dir()
OS independent method to Get the home directory of the current user.
@return:
string - home directory
'''
if os.name == "nt":
dir = "%s%s" % (os.environ.get('HOMEDRIVE'), os.environ.get('HOMEPATH'))
if os.path.exists(dir):
return dir
return os.path.expanduser('~')
def create_resource_path(self, login=None):
'''DEPRECATED: use create_resource_paths() or get_resource_path()
Create the resource path'''
# get the current user
if not login:
login = getpass.getuser()
filename = "%s.tacticrc" % login
# first check home directory
dir = self.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
# if the home directory is not existent or writable,
# use the temp directory
if not os.path.exists(dir) or not is_dir_writeable:
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
if not os.path.exists(dir):
os.makedirs(dir)
else:
dir = "%s/.tactic/etc" % dir
if not os.path.exists(dir):
os.makedirs(dir)
# if an old resource path does exist, then remove it
if os.name == "nt":
old_dir = "C:/sthpw/etc"
else:
old_dir = "/tmp/sthpw/etc"
old_path = "%s/%s" % (old_dir, filename)
if os.path.exists(old_path):
os.unlink(old_path)
print("Removing deprectated resource file [%s]" % old_path)
path = "%s/%s" % (dir,filename)
return path
def create_resource_paths(self, login=None):
'''Get the 1 or possiblly 2 the resource paths for creation'''
# get the current user
os_login = getpass.getuser()
if not login:
login = os_login
filename = "%s.tacticrc" % login
filename2 = "%s.tacticrc" % os_login
# first check home directory
dir = self.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
# if the home directory is not existent or writable,
# use the temp directory
if not os.path.exists(dir) or not is_dir_writeable:
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
if not os.path.exists(dir):
os.makedirs(dir)
else:
dir = "%s/.tactic/etc" % dir
if not os.path.exists(dir):
os.makedirs(dir)
# if an old resource path does exist, then remove it
if os.name == "nt":
old_dir = "C:/sthpw/etc"
else:
old_dir = "/tmp/sthpw/etc"
old_path = "%s/%s" % (old_dir, filename)
if os.path.exists(old_path):
os.unlink(old_path)
print("Removing deprectated resource file [%s]" % old_path)
path = "%s/%s" % (dir,filename)
path2 = "%s/%s" % (dir,filename2)
paths = [path]
if path2 != path:
paths.append(path2)
return paths
def get_resource_path(self, login=None):
'''API Function: get_resource_path(login=None)
Get the resource path of the current user. It differs from
create_resource_paths() which actually create dir. The resource path
identifies the location of the file which is used to cache connection information.
An exmple of the contents is shown below:
[code]
login=admin
server=localhost
ticket=30818057bf561429f97af59243e6ef21
project=unittest
[/code]
The contents in the resource file represent the defaults to use
when connection to the TACTIC server, but may be overriden by the
API methods: set_ticket(), set_server(), set_project() or the
environment variables: TACTIC_TICKET, TACTIC_SERVER, and TACTIC_PROJECT
Typically this method is not explicitly called by API developers and
is used automatically by the API server stub. It attempts to get from
home dir first and then from temp_dir is it fails.
@param:
login (optional) - login code. If not provided, it gets the current system user
@return:
string - resource file path
'''
# get the current user
if not login:
login = getpass.getuser()
filename = "%s.tacticrc" % login
# first check home directory
dir = self.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
path = "%s/.tactic/etc/%s" % (dir,filename)
# if the home directory path does not exist, check the temp directory
if not is_dir_writeable or not os.path.exists(path):
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
else:
dir = "%s/.tactic/etc" % dir
path = "%s/%s" % (dir,filename)
return path
def get_ticket(self, login, password):
'''API Function: get_ticket(login, password)
Get an authentication ticket based on a login and password.
This function first authenticates the user and the issues a ticket.
The returned ticket is used on subsequent calls to the client api
@param:
login - the user that is used for authentications
password - the password of that user
@return:
string - ticket key
'''
return self.server.get_ticket(login, password)
def get_info_from_user(self, force=False):
'''API Function: get_info_from_user(force=False)
Get input from the user about the users environment. Questions
asked pertain to the location of the tactic server, the project worked
on and the user's login and password. This information is stored in
an .<login>.tacticrc file.
@keyparam:
force - if set to True, it will always ask for new infomation from the
command prompt again
'''
if self.protocol == "local":
return
old_server_name = self.server_name
old_project_code = self.project_code
old_ticket = self.login_ticket
old_login = self.login
default_login = getpass.getuser()
if not force and old_server_name and old_project_code:
return
print("\n")
print("TACTIC requires the following connection information:")
print("\n")
server_name = raw_input("Enter name of TACTIC server (%s): " % old_server_name)
if not server_name:
server_name = old_server_name
print("\n")
login = raw_input("Enter user name (%s): " % default_login)
if not login:
login = default_login
print("\n")
if login == old_login and old_ticket:
password = getpass.getpass("Enter password (or use previous ticket): ")
else:
password = getpass.getpass("Enter password: ")
print("\n")
project_code = raw_input("Project (%s): " % old_project_code)
if not project_code:
project_code = old_project_code
self.set_server(server_name)
# do the actual work
if login != old_login or password:
ticket = self.get_ticket(login, password)
print("Got ticket [%s] for [%s]" % (ticket, login))
else:
ticket = old_ticket
# commit info to a file
paths = self.create_resource_paths(login)
# this is needed when running get_ticket.py
self.login = login
for path in paths:
file = open(path, 'w')
file.write("login=%s\n" % login)
file.write("server=%s\n" % server_name)
file.write("ticket=%s\n" % ticket)
if project_code:
file.write("project=%s\n" % project_code)
file.close()
print("Saved to [%s]" % path)
# set up the server with the new information
self._setup(self.protocol)
#
# Simple Ping Test
#
def ping(self):
return self.server.ping(self.ticket)
def fast_ping(self):
return self.server.fast_ping(self.ticket)
def fast_query(self, search_type, filters=[], limit=None):
results = self.server.fast_query(self.ticket, search_type, filters, limit)
return eval(results)
def test_speed(self):
return self.server.test_speed(self.ticket)
def get_connection_info(self):
'''simple test to get connection info'''
return self.server.get_connection_info(self.ticket)
#
# Logging facilities
#
def log(self, level, message, category="default"):
'''API Function: log(level, message, category="default")
Log a message in the logging queue. It is often difficult to see output
of a trigger unless you are running the server in debug mode. In
production mode, the server sends the output to log files which
are general buffered, so it cannot be predicted exactly when
this output will be dumped to a file.
This method will make a request to the server and store the message
in the database in the debug log table.
@param:
level - debug level
message - freeform string describing the entry
@keyparam:
category - a label for the type of message being logged.
It defaults to "default"
'''
return self.server.log(self.ticket, level,message, category)
#
# Transaction methods
#
def set_state(self, name, value):
'''Set a state for this transaction
@params
name: name of state variable
value: value of state variable
'''
return self.server.set_state(self.ticket, name, value)
def set_project_state(self, project):
'''Covenience function to set the project state
@params
project: code of the project to set the state to
'''
return self.set_state("project", project)
def start(self, title='', description=''):
'''API Function: start(title, description='')
Start a transaction. All commands using the client API are bound
in a transaction. The combination of start(), finish() and abort()
makes it possible to group a series of API commands in a single
transaction. The start/finish commands are not necessary for
query operations (like query(...), get_snapshot(...), etc).
@param:
title - the title of the command to be executed. This will show up on
transaction log
@keyparam:
description - the description of the command. This is more detailed.
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
self.get_info_from_user()
if not self.has_server:
raise TacticApiException("No server connected. If running a command line script, please execute get_ticket.py")
ticket = self.server.start(self.login_ticket, self.project_code, \
title, description)
self.set_transaction_ticket(ticket)
#client_version = self.get_client_version()
#server_version = self.get_server_version()
# Switch to using api versions
client_api_version = self.get_client_api_version()
server_api_version = self.get_server_api_version()
if client_api_version != server_api_version:
raise TacticApiException("Server version [%s] does not match client api version [%s]" % (server_api_version, client_api_version) )
self.set_server(self.server_name)
# clear the handoff dir
self.handoff_dir = None
return ticket
def finish(self, description=''):
'''API Function: finish()
End the current transaction and cleans it up
@params:
description: this will be recorded in the transaction log as the
description of the transction
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
if self.protocol == "local":
return
result = self.server.finish(self.ticket, description)
self.set_login_ticket(self.login_ticket)
#self.ticket = None
#self.transaction_ticket = None
return result
def abort(self, ignore_files=False):
'''API Function: abort(ignore_files=False)
Abort the transaction. This undos all commands that occurred
from the beginning of the transactions
@keyparam:
ignore_files: (boolean) - determines if any files moved into the
repository are left as is. This is useful for very long processes
where it is desireable to keep the files in the repository
even on abort.
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
if self.protocol == "local":
return
result = self.server.abort(self.ticket, ignore_files)
self.ticket = None
self.transaction_ticket = None
return result
# FIXME: have to fix these because these are post transaction!!
def undo(self, transaction_ticket=None, transaction_id=None, ignore_files=False):
'''API Function: undo(transaction_ticket=None, transaction_id=None, ignore_files=False)
undo an operation. If no transaction id is given, then the last
operation of this user on this project is undone
@keyparam:
transaction_ticket - explicitly undo a specific transaction
transaction_id - explicitly undo a specific transaction by id
ignore_files - flag which determines whether the files should
also be undone. Useful for large preallcoated checkins.
'''
if self.protocol == "local":
return
return self.server.undo(self.ticket, transaction_ticket, transaction_id, ignore_files)
def redo(self, transaction_ticket=None, transaction_id=None):
'''API Function: redo(transaction_ticket=None, transaction_id=None)
Redo an operation. If no transaction id is given, then the last
undone operation of this user on this project is redone
@keyparam:
transaction_ticket - explicitly redo a specific transaction
transaction_id - explicitly redo a specific transaction by id
'''
if self.protocol == "local":
return
return self.server.redo(self.ticket, transaction_ticket, transaction_id)
#
# Low Level Database methods
#
def get_column_info(self, search_type):
'''API Function: get_column_info(search_type)
Get column information of the table given a search type
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return - a dictionary of info for each column
'''
results = self.server.get_column_info(self.ticket, search_type)
return results
def get_related_types(self, search_type):
'''API Function: get_related_types(search_type)
Get related search types given a search type
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return - list of search_types
'''
results = self.server.get_related_types(self.ticket, search_type)
return results
def query(self, search_type, filters=[], columns=[], order_bys=[], show_retired=False, limit=None, offset=None, single=False, distinct=None, return_sobjects=False):
'''API Function: query(search_type, filters=[], columns=[], order_bys=[], show_retired=False, limit=None, offset=None, single=False, distinct=None, return_sobjects=False)
General query for sobject information
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
filters - an array of filters to alter the search
columns - an array of columns whose values should be
retrieved
order_bys - an array of order_by to alter the search
show_retired - sets whether retired sobjects are also
returned
limit - sets the maximum number of results returned
single - returns only a single object
distinct - specify a distinct column
return_sobjects - return sobjects instead of dictionary. This
works only when using the API on the server.
@return:
list of dictionary/sobjects - Each array item represents an sobject
and is a dictionary of name/value pairs
@example:
[code]
filters = []
filters.append( ("code", "XG002") )
order_bys = ['timestamp desc']
columns = ['code']
server.query(ticket, "prod/shot", filters, columns, order_bys)
[/code]
The arguments "filters", "columns", and "order_bys" are optional
The "filters" argument is a list. Each list item represents an
individual filter. The forms are as follows:
[code]
(column, value) -> where column = value
(column, (value1,value2)) -> where column in (value1, value2)
(column, op, value) -> where column op value
where op is ('like', '<=', '>=', '>', '<', 'is', '~', '!~','~*','!~*)
(value) -> where value
[/code]
'''
#return self.server.query(self.ticket, search_type, filters, columns, order_bys, show_retired, limit, offset, single, return_sobjects)
results = self.server.query(self.ticket, search_type, filters, columns, order_bys, show_retired, limit, offset, single, distinct, return_sobjects)
if not return_sobjects:
results = eval(results)
return results
def insert(self, search_type, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True):
'''API Function: insert(search_type, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
General insert for creating a new sobject
@param:
search_key - a unique identifier key representing an sobject
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key.
parent_key - set the parent key for this sobject
@keyparam:
metadata - a dictionary of values that will be stored in the metadata attribute
if available
info - a dictionary of info to pass to the ApiClientCmd
use_id - use id in the returned search key
triggers - boolean to fire trigger on insert
@return:
dictionary - represent the sobject with it's current data
@example:
insert a new asset
[code]
search_type = "prod/asset"
data = {
'code': chr001,
'description': 'Main Character'
}
server.insert( search_type, data )
[/code]
insert a new note with a shot parent
[code]
# get shot key
shot_key = server.build_search_key(search_type='prod/shot',code='XG001')
data = {
'context': 'model',
'note': 'This is a modelling note',
'login': server.get_login()
}
server.insert( search_type, data, parent_key=shot_key)
[/code]
insert a note without firing triggers
[code]
search_type = "sthpw/note"
data = {
'process': 'roto',
'context': 'roto',
'note': 'The keys look good.',
'project_code': 'art'
}
server.insert( search_type, data, triggers=False )
[/code]
'''
return self.server.insert(self.ticket, search_type, data, metadata, parent_key, info, use_id, triggers)
def update(self, search_key, data={}, metadata={}, parent_key=None, info={}, use_id=False, triggers=True):
'''API Function: update(search_key, data={}, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
General update for updating sobject
@param:
search_key - a unique identifier key representing an sobject.
Note: this can also be an array, in which case, the data will
be updated to each sobject represented by this search key
@keyparam:
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
Note: this can also be an array. Each data dictionary element in
the array will be applied to the corresponding search key
parent_key - set the parent key for this sobject
info - a dictionary of info to pass to the ApiClientCmd
metadata - a dictionary of values that will be stored in the metadata attribute if available
use_id - use id in the returned search key
triggers - boolean to fire trigger on update
@return:
dictionary - represent the sobject with its current data.
If search_key is an array, This will be an array of dictionaries
'''
return self.server.update(self.ticket, search_key, data, metadata, parent_key, info, use_id, triggers)
def update_multiple(self, data, triggers=True):
'''API Function: update_multiple(data, triggers=True)
Update for several sobjects in one function call. The
data structure contains all the info needed to update and is
formatted as follows:
data = {
search_key1: { column1: value1, column2: value2 }
search_key2: { column1: value1, column2: value2 }
}
@params:
ticket - authentication ticket
data - data structure containing update information for all
sobjects
@keyparam:
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
Note: this can also be an array. Each data dictionary element in
the array will be applied to the corresponding search key
triggers - boolean to fire trigger on insert
@return:
None
'''
return self.server.update_multiple(self.ticket, data, trigger)
def insert_update(self, search_key, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True):
'''API Function: insert_update(search_key, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
Insert if the entry does not exist, update otherwise
@param:
search_key - a unique identifier key representing an sobject.
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
@keyparam:
metadata - a dictionary of values that will be stored in the metadata attribute if available
parent_key - set the parent key for this sobject
info - a dictionary of info to pass to the ApiClientCmd
use_id - use id in the returned search key
triggers - boolean to fire trigger on insert
@return:
dictionary - represent the sobject with its current data.
'''
return self.server.insert_update(self.ticket, search_key, data, metadata, parent_key, info, use_id, triggers)
def get_unique_sobject(self, search_type, data={}):
'''API Function: get_unique_sobject(search_type, data={})
This is a special convenience function which will query for an
sobject and if it doesn't exist, create it. It assumes that this
object should exist and spares the developer the logic of having to
query for the sobject, test if it doesn't exist and then create it.
@param:
search_type - the type of the sobject
data - a dictionary of name/value pairs that uniquely identify this
sobject
@return:
sobject - unique sobject matching the critieria in data
'''
results = self.server.get_unique_sobject(self.ticket, search_type, data)
return results
def get_column_names(self, search_type):
'''API Function: get_column_names(search_type)
This method will get all of the column names associated with a search
type
@param:
search_type - the search type used to query the columns for
@return
list of columns names
'''
return self.server.get_column_names(self.ticket, search_type)
#
# Expression methods
#
def eval(self, expression, search_keys=[], mode=None, single=False, vars={}, show_retired=False):
'''API Function: eval(expression, search_keys=[], mode=None, single=False, vars={}, show_retired=False)
Evaluate the expression. This expression uses the TACTIC expression
language to retrieve results. For more information, refer to the
expression language documentation.
@param:
expression - string expression
@keyparam:
search_keys - the starting point for the expression.
mode - string|expression - determines the starting mode of the expression
single - True|False - True value forces a single return value
vars - user defined variable
show_retired - defaults to False to not return retired items
@return:
results of the expression. The results depend on the exact nature
of the expression.
@example:
#1. Search for snapshots with context beginning with 'model' for the asset with the search key 'prod/asset?project=sample3d&id=96'
[code]
server = TacticServerStub.get()
exp = "@SOBJECT(sthpw/snapshot['context','EQ','^model'])"
result = server.eval(exp, search_keys=['prod/asset?project=sample3d&id=96'])
[/code]
Please refer to the expression language documentation for numerous
examples on how to use the expression language.
'''
#return self.server.eval(self.ticket, expression, search_keys, mode, single, vars)
results = self.server.eval(self.ticket, expression, search_keys, mode, single, vars, show_retired)
try:
return eval(results)
except:
return results
#
# Higher Level Object methods
#
def create_search_type(self, search_type, title, description="", has_pipeline=False):
'''API Function: create_search_type(search_type, title, description="", has_pipeline=False)
Create a new search type
@param:
search_type - Newly defined search_type
title - readable title to display this search type as
@keyparam:
description - a brief description of this search type
has_pipeline - determines whether this search type goes through a
pipeline. Simply puts a pipeline_code column in the table.
@return
string - the newly created search type
'''
return self.server.create_search_type(self.ticket, search_type, title, description, has_pipeline)
def add_column_to_search_type(self, search_type, column_name, column_type):
'''Adds a new column to the search type
@params
search_type - the search type that the new column will be added to
column_name - the name of the column to add to the database
column_type - the type of the column to add to the database
@return
True if column was created, False if column exists
'''
return self.server.add_column_to_search_type(self.ticket, search_type, column_name, column_type)
def get_by_search_key(self, search_key):
'''API Function: get_by_search_key(search_key)
Get the info on an sobject based on search key
@param:
ticket - authentication ticket
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return:
list of dictionary - sobjects that represent values of the sobject in the
form of name:value pairs
'''
return self.server.get_by_search_key(self.ticket, search_key)
def delete_sobject(self, search_key):
'''API Function: delete_sobject(search_key)
Invoke the delete method. Note: this function may fail due
to dependencies. Tactic will not cascade delete. This function
should be used with extreme caution because, if successful, it will
permanently remove the existence of an sobject
@param:
search_key - a unique identifier key representing an sobject.
Note: this can also be an array.
@return:
dictionary - a sobject that represents values of the sobject in the
form name:value pairs
'''
return self.server.delete_sobject(self.ticket, search_key)
def retire_sobject(self, search_key):
'''API Function: retire_sobject(search_key)
Invoke the retire method. This is preferred over delete_sobject if
you are not sure whether other sobjects has dependency on this.
@param:
search_key - the unige key identifying the sobject.
@return:
dictionary - sobject that represents values of the sobject in the
form name:value pairs
'''
return self.server.retire_sobject(self.ticket, search_key)
def reactivate_sobject(self, search_key):
'''API Function: reactivate_sobject(search_key)
Invoke the reactivate method.
@param:
search_key - the unige key identifying the sobject.
@return:
dictionary - sobject that represents values of the sobject in the
form name:value pairs
'''
return self.server.reactivate_sobject(self.ticket, search_key)
def get_parent(self, search_key, columns=[]):
'''API Function: get_parent(search_key, columns=[])
Get the parent of an sobject.
@param:
search_key - a unique identifier key representing an sobject
@keyparam:
columns - the columns that will be returned in the sobject
@return:
dictionary - the parent sobject
'''
return self.server.get_parent(self.ticket, search_key, columns)
def get_all_children(self, search_key, child_type, filters=[], columns=[]):
'''API Function: get_all_children(search_key, child_type, columns=[])
Get all children of a particular child type of an sobject
@param:
ticket - authentication ticket
search_key - a unique identifier key representing an sobject
child_type - the search_type of the children to search for
@keyparam:
filters - extra filters on the query : see query method for examples
columns - list of column names to be included in the returned dictionary
@return:
list of dictionary - a list of sobjects dictionaries
'''
#filters = []
return self.server.get_all_children(self.ticket, search_key, child_type, filters, columns)
def get_parent_type(self, search_key):
'''API Function: get_parent_type(search_key)
Get of the parent search type
@param:
search_key - a unique identifier key representing an sobject
@return:
list - a list of child search_types
'''
return self.server.get_parent_type(self.ticket, search_key)
def get_child_types(self, search_key):
'''API Function: get_child_types(search_key)
Get all the child search types
@param:
search_key - a unique identifier key representing an sobject
@return:
list - the child search types
'''
return self.server.get_child_types(self.ticket, search_key)
def get_types_from_instance(self, instance_type):
'''API Function: gets the connector types from an instance type
@param:
instance_type - the search type of the instance
@return:
tuple - (from_type, parent_type)
a tuple with the from_type and the parent_type. The from_type is
the connector type and the parent type is the search type of the
parent of the instance
'''
return self.server.get_types_from_instance(self.ticket, instance_type)
def connect_sobjects(self, src_sobject, dst_sobject, context='default'):
'''connect two sobjects together
@params
src_sobject: the original sobject from which the connection starts
dst_sobject: the sobject to which the connection connects to
context: an arbirarty parameter which defines type of connection
@return
the connection sobject
'''
return self.server.create(self.ticket, src_sobject, dst_sobject, context)
#
# upload/download methods
#
def download(self, url, to_dir=".", filename='', md5_checksum=""):
'''API Function: download(self, url, to_dir=".", filename='', md5_checksum="")
Download a file from a given url
@param:
url - the url source location of the file
@keyparam:
to_dir - the directory to download to
filename - the filename to download to, defaults to original filename
md5_checksum - an md5 checksum to match the file against
@return:
string - path of the file donwloaded
'''
# use url filename by default
if not filename:
filename = os.path.basename(url)
# download to temp_dir
#if not to_dir:
# to_dir = self.get_tmpdir()
# make sure the directory exists
if not os.path.exists(to_dir):
os.makedirs(to_dir)
to_path = "%s/%s" % (to_dir, filename)
# check if this file is already downloaded. if so, skip
if os.path.exists(to_path):
# if it exists, check the MD5 checksum
if md5_checksum:
if self._md5_check(to_path, md5_checksum):
print("skipping '%s', already exists" % to_path)
return to_path
else:
# always download if no md5_checksum available
pass
f = urllib.urlopen(url)
file = open(to_path, "wb")
file.write( f.read() )
file.close()
f.close()
# check for downloaded file
# COMMENTED OUT for now since it does not work well with icons
#if md5_checksum and not self._md5_check(to_path, md5_checksum):
# raise TacticException('Downloaded file [%s] in local repo failed md5 check. This file may be missing on the server or corrupted.'%to_path)
"""
print("starting download")
try:
import urllib2
file = open(to_path, "wb")
req = urllib2.urlopen(url)
try:
while True:
buffer = req.read(1024*100)
print("read: ", len(buffer))
if not buffer:
break
file.write( buffer )
finally:
print("closing ....")
req.close()
file.close()
except urllib2.URLError as e:
raise Exception('%s - %s' % (e,url))
print("... done download")
"""
return to_path
def upload_file(self, path):
'''API Function: upload_file(path)
Use http protocol to upload a file through http
@param:
path - the name of the file that will be uploaded
'''
from pyasm.application.common import UploadMultipart
upload = UploadMultipart()
upload.set_ticket(self.transaction_ticket)
if self.server_name.startswith("http://") or self.server_name.startswith("https://"):
upload_server_url = "%s/tactic/default/UploadServer/" % self.server_name
else:
upload_server_url = "http://%s/tactic/default/UploadServer/" % self.server_name
upload.set_upload_server(upload_server_url)
upload.execute(path)
# upload a file
#filename = os.path.basename(path)
#file = open(path, 'rb')
#data = xmlrpclib.Binary( file.read() )
#file.close()
#return self.server.upload_file(self.transaction_ticket, filename, data)
def upload_group(self, path, file_range):
'''uses http protocol to upload a sequences of files through HTTP
@params
path - the name of the file that will be uploaded
file_range - string describing range of frames in the form '1-5/1'
'''
start, end = file_range.split("-")
start = int(start)
end = int(end)
if path.find("####") != -1:
path = path.replace("####", "%0.4d")
# TODO: add full range functionality here
for frame in range(start, end+1):
full_path = path % frame
self.upload_file(full_path)
# file group functions
def _get_file_range(self, file_range):
'''get the file_range'''
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
frame_by = int(frame_by)
frame_start, frame_end = file_range.split("-")
frame_start = int(frame_start)
frame_end = int(frame_end)
return frame_start, frame_end, frame_by
def _expand_paths(self, file_path, file_range):
'''expands the file paths, replacing # as specified in the file_range
@param - file_path with #### or %0.4d notation
@file_range - a tuple'''
file_paths = []
frame_start, frame_end, frame_by = self._get_file_range(file_range)
# support %0.4d notation
if file_path.find('#') == -1:
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path % i
file_paths.append( expanded )
else:
# find out the number of #'s in the path
padding = len( file_path[file_path.index('#'):file_path.rindex('#')] )+1
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path.replace( '#'*padding, str(i).zfill(padding) )
file_paths.append(expanded)
return file_paths
#
# Checkin/out methods
#
def create_snapshot(self, search_key, context, snapshot_type="file",
description="No description", is_current=True, level_key=None,
is_revision=False ):
'''API Function: create_snapshot(search_key, context, snapshot_type="file", description="No description", is_current=True, level_key=None, is_revision=False )
Create an empty snapshot
@param:
search_key - a unique identifier key representing an sobject
context - the context of the checkin
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
is_current - flag to determine if this checkin is to be set as current
is_revision - flag to set this as a revision instead of a version
level_key - the unique identifier of the level that this
is to be checked into
@return:
dictionary - representation of the snapshot created for this checkin
'''
return self.server.create_snapshot(self.ticket, search_key, context, snapshot_type, description, is_current, level_key, is_revision)
def simple_checkin(self, search_key, context, file_path,
snapshot_type="file", description="No description",
use_handoff_dir=False, file_type="main", is_current=True,
level_key=None, breadcrumb=False, metadata={}, mode='upload',
is_revision=False, info={} , keep_file_name=False, create_icon=True,
checkin_cls='pyasm.checkin.FileCheckin', context_index_padding=None, checkin_type="strict", source_path=None):
'''API Function: simple_checkin( search_key, context, file_path, snapshot_type="file", description="No description", use_handoff_dir=False, file_type="main", is_current=True, level_key=None, breadcrumb=False, metadata={}, mode=None, is_revision=False, info={}, keep_file_name=False, create_icon=True, checkin_cls='pyasm.checkin.FileCheckin', context_index_padding=None, checkin_type="strict", source_path=None )
Simple method that checks in a file.
@param:
search_key - a unique identifier key representing an sobject
context - the context of the checkin
file_path - path of the file that was previously uploaded
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
file_type - [optional] optional description for this file_type
is_current - flag to determine if this checkin is to be set as current
level_key - the unique identifier of the level that this
is to be checked into
breadcrumb - flag to leave a .snapshot breadcrumb file containing
information about what happened to a checked in file
metadata - a dictionary of values that will be stored as metadata
on the snapshot
mode - inplace, upload, copy, move, inplace
is_revision - flag to set this as a revision instead of a version
create_icon - flag to create an icon on checkin
info - dict of info to pass to the ApiClientCmd
keep_file_name - keep the original file name
create_icon - Create an icon by default
checkin_cls - checkin class
context_index_padding - determines the padding used for context
indexing: ie: design/0001
checkin_type - auto or strict which controls whether to auto create versionless
source_path - explicitly give the source path
@return:
dictionary - representation of the snapshot created for this checkin
'''
mode_options = ['upload', 'copy', 'move', 'local','inplace']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
if mode == 'upload':
self.upload_file(file_path)
elif mode in ['copy', 'move']:
handoff_dir = self.get_handoff_dir()
use_handoff_dir = True
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
os.chmod(handoff_dir, 0777)
except OSError as e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__()))
# copy or move the tree
basename = os.path.basename(file_path)
if mode == 'move':
shutil.move(file_path, "%s/%s" % (handoff_dir, basename))
elif mode == 'copy':
shutil.copy(file_path, "%s/%s" % (handoff_dir, basename))
elif mode in ['local']:
# do nothing
pass
# check in the file to the server
snapshot = self.server.simple_checkin(self.ticket, search_key, context, file_path, snapshot_type, description, use_handoff_dir, file_type, is_current, level_key, metadata, mode, is_revision, info, keep_file_name, create_icon, checkin_cls, context_index_padding, checkin_type, source_path)
if mode == 'local':
# get the naming conventions and move the file to the local repo
files = self.server.eval(self.ticket, "@SOBJECT(sthpw/file)", snapshot)
# FIXME: this only works on the python implementation
files = eval(files)
# TODO: maybe cache this??
base_dirs = self.server.get_base_dirs(self.ticket)
if os.name == 'nt':
client_repo_dir = base_dirs.get("win32_local_repo_dir")
# DEPRECATED
if not client_repo_dir:
client_repo_dir = base_dirs.get("win32_local_base_dir")
client_repo_dir = "%s/repo" % client_repo_dir
else:
client_repo_dir = base_dirs.get("linux_local_repo_dir")
if not client_repo_dir:
client_repo_dir = base_dirs.get("win32_local_base_dir")
client_repo_dir = "%s/repo" % client_repo_dir
for file in files:
rel_path = "%s/%s" %( file.get('relative_dir'), file.get('file_name'))
repo_path = "%s/%s" % (client_repo_dir, rel_path)
repo_dir = os.path.dirname(repo_path)
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
shutil.copy(file_path, repo_path)
# leave a breadcrumb
if breadcrumb:
snapshot_code = snapshot.get('code')
full_snapshot_xml = self.get_full_snapshot_xml(snapshot_code)
snapshot_path = "%s.snapshot" % file_path
file = open(snapshot_path, 'wb')
file.write(full_snapshot_xml)
file.close()
return snapshot
def group_checkin(self, search_key, context, file_path, file_range, snapshot_type="sequence", description="", file_type='main', metadata={}, mode=None, is_revision=False , info={} ):
'''API Function: group_checkin(search_key, context, file_path, file_range, snapshot_type="sequence", description="", file_type='main', metadata={}, mode=None, is_revision=False, info={} )
Check in a range of files. A range of file is defined as any group
of files that have some sequence of numbers grouping them together.
An example of this includes a range frames that are rendered.
Although it is possible to add each frame in a range using add_file,
adding them as as sequence is lightweight, often significantly reducing
the number of database entries required. Also, it is understood that
test files form a range of related files, so that other optimizations
and manipulations can be operated on these files accordingly.
@param:
search_key - a unique identifier key representing an sobject
file_path - expression for file range: ./blah.####.jpg
file_type - the typ of file this is checked in as. Default = 'main'
file_range - string describing range of frames in the form '1-5/1'
@keyparam:
snapshot_type - type of snapshot this checkin will have
description - description related to this checkin
file_type - the type of file that will be associated with this group
metadata - add metadata to snapshot
mode - determines whether the files passed in should be copied, moved
or uploaded. By default, this is a manual process (for backwards
compatibility)
is_revision - flag to set this as a revision instead of a version
info - dict of info to pass to the ApiClientCmd
@return:
dictionary - snapshot
'''
mode_options = ['upload', 'copy', 'move', 'inplace']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
# brute force method
if mode == 'move':
handoff_dir = self.get_handoff_dir()
expanded_paths = self._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.move(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
elif mode == 'copy':
handoff_dir = self.get_handoff_dir()
expanded_paths = self._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.copy(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
elif mode == 'upload':
expanded_paths = self._expand_paths(file_path, file_range)
for path in expanded_paths:
self.upload_file(path)
use_handoff_dir = False
elif mode == 'inplace':
use_handoff_dir = False
# get the absolute path
file_path = os.path.abspath(file_path)
return self.server.group_checkin(self.ticket, search_key, context, file_path, file_range, snapshot_type, description, file_type, metadata, mode, is_revision, info )
def directory_checkin(self, search_key, context, dir, snapshot_type="directory", description="No description", file_type='main', is_current=True, level_key=None, metadata={}, mode="copy", is_revision=False):
'''API Function: directory_checkin(search_key, context, dir, snapshot_type="directory", description="No description", file_type='main', is_current=True, level_key=None, metadata={}, mode="copy", is_revision=False)
Check in a directory of files. This informs TACTIC to treat the
entire directory as single entity without regard to the structure
of the contents. TACTIC will not know about the individual files
and the directory hierarchy within the base directory and it it left
up to the and external program to intepret and understand this.
This is often used when logic on the exact file structure exists in
some external source outside of TACTIC and it is deemed too complicated
to map this into TACTIC's snapshot definition.
@param:
search_key - a unique identifier key representing an sobject
dir - the directory that needs to be checked in
@keyparam:
snapshot_type - type of snapshot this checkin will have
description - description related to this checkin
file_type - the type of file that will be associated with this group
is_current - makes this snapshot current
level_key - the search key of the level if used
metadata - add metadata to snapshot
mode - determines whether the files passed in should be copied, moved
or uploaded. By default, this is 'copy'
is_revision - flag to set this as a revision instead of a version
@return:
dictionary - snapshot
'''
if mode not in ['copy', 'move', 'inplace', 'local']:
raise TacticApiException('mode must be either [move] or [copy]')
handoff_dir = self.get_handoff_dir()
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
os.chmod(handoff_dir, 0777)
except OSError as e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__()))
# strip the trailing / or \ if any
m = re.match(r'(.*)([/|\\]$)', dir)
if m:
dir = m.groups()[0]
# copy or move the tree to the handoff directory
basename = os.path.basename(dir)
if mode == 'move':
shutil.move(dir, "%s/%s" % (handoff_dir, basename))
elif mode == 'copy':
shutil.copytree(dir, "%s/%s" % (handoff_dir, basename))
use_handoff_dir = True
snapshot = self.server.simple_checkin(self.ticket, search_key, context,
dir, snapshot_type, description, use_handoff_dir, file_type,
is_current, level_key, metadata, mode, is_revision)
if mode == 'local':
# get the naming conventions and move the file to the local repo
files = self.server.eval(self.ticket, "@SOBJECT(sthpw/file)", snapshot)
# FIXME: this only works on the python implementation
files = eval(files)
for file in files:
rel_path = "%s/%s" %( file.get('relative_dir'), file.get('file_name'))
base_dirs = self.server.get_base_dirs(self.ticket)
if os.name == 'nt':
client_repo_dir = base_dirs.get("win32_local_base_dir")
else:
client_repo_dir = base_dirs.get("linux_local_base_dir")
repo_path = "%s/%s" % (client_repo_dir, rel_path)
repo_dir = os.path.dirname(repo_path)
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
shutil.copytree(dir,repo_path)
return snapshot
def add_dependency(self, snapshot_code, file_path, type='ref', tag='main'):
'''API Function: add_dependency(snapshot_code, file_path, type='ref')
This method will append a dependency referent to an existing checkin.
All files are uniquely containe by a particular snapshot. Presently,
this method does a reverse lookup by file name. This assumes that
the filename is unique within the system, so it is not recommended
unless it is known that naming conventions will produce unique
file names for every this particular file. If this is not the
case, it is recommended that add_dependency_by_code() is used.
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - the path of the dependent file. This function is able
reverse map the file_path to the appropriate snapshot
@keyparam:
type - type of dependency. Values include 'ref' and 'input_ref'
ref = hierarchical reference: ie A contains B
input_ref = input reference: ie: A was used to create B
tag - a tagged keyword can be added to a dependency to categorize
the different dependencies that exist in a snapshot
@return:
dictionary - the resulting snapshot
'''
return self.server.add_dependency(self.ticket, snapshot_code, file_path, type, tag)
def add_dependency_by_code(self, to_snapshot_code, from_snapshot_code, type='ref', tag='main'):
'''API Function: add_dependency_by_code(to_snapshot_code, from_snapshot_code, type='ref')
Append a dependency reference to an existing checkin. This dependency
is used to connect various checkins together creating a separate
dependency tree for each checkin.
@param:
to_snapshot_code: the snapshot code which the dependency will be
connected to
from_snapshot_code: the snapshot code which the dependency will be
connected from
type - type of dependency. Values include 'ref' and 'input_ref'
ref = hierarchical reference: ie A contains B
input_ref - input reference: ie: A was used to create B
tag - a tagged keyword can be added to a dependency to categorize
the different dependencies that exist in a snapshot
@return:
dictionary - the resulting snapshot
'''
return self.server.add_dependency_by_code(self.ticket, to_snapshot_code, from_snapshot_code, type, tag)
def add_file(self, snapshot_code, file_path, file_type='main', use_handoff_dir=False, mode=None, create_icon=False, dir_naming='', file_naming=''):
'''API Function: add_file(snapshot_code, file_path, file_type='main', use_handoff_dir=False, mode=None, create_icon=False)
Add a file to an already existing snapshot. This method is used in
piecewise checkins. A blank snapshot can be created using
create_snapshot(). This method can then be used to successively
add files to the snapshot.
In order to checkin the file, the server will need to have access
to these files. There are a number of ways of getting the files
to the server. When using copy or move mode, the files are either
copied or moved to the "handoff_dir". This directory
is an agreed upon directory in which to handoff the files to the
server. This mode is generally used for checking in user files.
For heavy bandwidth checkins, it is recommended to user preallocated
checkins.
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - path of the file to add to the snapshot.
Optional: this can also be an array to add multiple files at once.
This has much faster performance that adding one file at a time.
Also, note that in this case, file_types must be an array
of equal size.
@keyparam:
file_type - type of the file to be added.
Optional: this can also be an array. See file_path argument
for more information.
use_handoff_dir - DEPRECATED: (use mode arg) use handoff dir to checkin
file. The handoff dir is an agreed upon directory between the
client and server to transfer files.
mode - upload|copy|move|manual|inplace - determine the protocol which delievers
the file to the server.
create_icon - (True|False) determine whether to create an icon for
this appended file. Only 1 icon should be created for each
snapshot.
@return:
dictionary - the resulting snapshot
@example:
This will create a blank model snapshot for character chr001 and
add a file
[code]
search_type = 'prod/asset'
code = 'chr001'
search_key = server.build_search_type(search_type, code)
context = 'model'
path = "./my_model.ma"
snapshot = server.create_snapshot(search_key, context)
server.add_file( snapshot.get('code'), path )
[/code]
Different files should be separated by file type. For example,
to check in both a maya and houdin file in the same snapshot:
[code]
maya_path = "./my_model.ma"
houdini_path = "./my_model.hip"
server.add_file( snapshot_code, maya_path, file_type='maya' )
server.add_file( snapshot_code, houdini_path, file_type='houdini' )
[/code]
To transfer files by uploading (using http protocol):
[code]
server.add_file( snapshot_code, maya_path, mode='upload' )
[/code]
To create an icon for this file
[code]
path = 'image.jpg'
server.add_file( snapshot_code, path, mode='upload', create_icon=True )
[/code]
To add multiple files at once
[code]
file_paths = [maya_path, houdini_path]
file_types ['maya', 'houdini']
server.add_file( snapshot_code, file_paths, file_types=file_types, mode='upload')
[/code]
'''
if type(file_path) != types.ListType:
file_paths = [file_path]
else:
file_paths = file_path
if type(file_type) != types.ListType:
file_types = [file_type]
else:
file_types = file_type
mode_options = ['upload', 'copy', 'move', 'preallocate','inplace']
if mode:
if mode in ['copy', 'move']:
handoff_dir = self.get_handoff_dir()
use_handoff_dir = True
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
except OSError as e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__()))
for i, file_path in enumerate(file_paths):
file_type = file_types[i]
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
if mode == 'upload':
self.upload_file(file_path)
use_handoff_dir = False
elif mode in ['copy', 'move']:
# copy or move the tree
basename = os.path.basename(file_path)
if mode == 'move':
shutil.move(file_path, "%s/%s" % (handoff_dir, basename))
elif mode == 'copy':
shutil.copy(file_path, "%s/%s" % (handoff_dir, basename))
return self.server.add_file(self.ticket, snapshot_code, file_paths, file_types, use_handoff_dir, mode, create_icon, dir_naming, file_naming)
def remove_file(self, snapshot_code, file_type):
return self.server.remove_file(self.ticket, snapshot_code, file_type)
def add_group(self, snapshot_code, file_path, file_type, file_range, use_handoff_dir=False, mode=None):
'''API Function: add_group(snapshot_code, file_path, file_type, file_range, use_handoff_dir=False, mode=None)
Add a file range to an already existing snapshot
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - path of the file to add to the snapshot
file_type - type of the file to be added.
file_range - range with format s-e/b
@keyparam:
use_handoff_dir - use handoff dir to checkin file
mode - one of 'copy','move','preallocate'
@return:
dictionary - the resulting snapshot
'''
mode_options = ['upload', 'copy', 'move', 'preallocate']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
#dir = os.path.dirname(file_path)
handoff_dir = self.get_handoff_dir()
if mode == 'move':
expanded_paths = self._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.move(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
elif mode == 'copy':
expanded_paths = self._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.copy(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
elif mode == 'upload':
self.upload_group(file_path, file_range)
use_handoff_dir = False
elif mode == 'preallocate':
use_handoff_dir = True
return self.server.add_group(self.ticket, snapshot_code, file_path, file_type, file_range, use_handoff_dir, mode)
def add_directory(self, snapshot_code, dir, file_type='main', mode="copy"):
'''API Function: add_directory(snapshot_code, dir, file_type='main', mode="copy")
Add a full directory to an already existing checkin.
This informs TACTIC to treat the entire directory as single entity
without regard to the structure of the contents. TACTIC will not
know about the individual files and the directory hierarchy within
the base directory and it it left up to the and external program
to intepret and understand this.
This is often used when logic on the exact file structure exists in
some external source outside of TACTIC and it is deemed to complictaed
to map this into TACTIC's snapshot definition.
@param:
snapshot_code - a unique identifier key representing an sobject
dir - the directory that needs to be checked in
@keyparam:
file_type - file type is used more as snapshot type here
mode - copy, move, preallocate, manual, inplace
@return:
dictionary - snapshot
@example:
This will create a new snapshot for a search_key and add a directory using manual mode
[code]
dir = 'C:/images'
handoff_dir = self.server.get_handoff_dir()
shutil.copytree('%s/subfolder' %dir, '%s/images/subfolder' %handoff_dir)
snapshot_dict = self.server.create_snapshot(search_key, context='render')
snapshot_code = snapshot_dict.get('code')
self.server.add_directory(snapshot_code, dir, file_type='dir', mode='manual')
[/code]
'''
if mode not in ['copy', 'move', 'preallocate', 'manual', 'inplace']:
raise TacticApiException('Mode must be one of [move, copy, preallocate]')
if mode in ['copy', 'move']:
handoff_dir = self.get_handoff_dir()
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
except OSError as e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__()))
# copy or move the tree
basename = os.path.basename(dir)
if mode == 'move':
shutil.move(dir, "%s/%s" % (handoff_dir, basename))
elif mode == 'copy':
shutil.copytree(dir, "%s/%s" % (handoff_dir, basename))
use_handoff_dir = True
return self.server.add_file(self.ticket, snapshot_code, dir, file_type, use_handoff_dir, mode )
def checkout(self, search_key, context, version=-1, file_type='main', to_dir=".", level_key=None, to_sandbox_dir=False, mode='copy'):
'''API Function: checkout(search_key, context, version=-1, file_type='main', dir='', level_key=None, to_sandbox_dir=False, mode='copy')
Check out files defined in a snapshot from the repository. This
will copy files to a particular directory so that a user can work
on them.
@param:
search_key - a unique identifier key representing an sobject
context - context of the snapshot
@keyparam:
version - version of the snapshot
file_type - file type defaults to 'main'. If set to '*', all paths are checked out
level_key - the unique identifier of the level in the form of a search key
to_dir - destination directory defaults to '.'
to_sandbox_dir - (True|False) destination directory defaults to
sandbox_dir (overrides "to_dir" arg)
mode - (copy|download) - determines the protocol that will be used
to copy the files to the destination location
@return:
list - a list of paths that were checked out
'''
if not os.path.isdir(to_dir):
raise TacticApiException("[%s] does not exist or is not a directory" % to_dir)
to_dir = to_dir.replace("\\","/")
#repo_paths = self.server.checkout(self.ticket, search_key, context, version, file_type, level_key)
paths = self.server.checkout(self.ticket, search_key, context, version, file_type, level_key)
client_lib_paths = paths['client_lib_paths']
sandbox_paths = paths['sandbox_paths']
web_paths = paths['web_paths']
to_paths = []
for i, client_lib_path in enumerate(client_lib_paths):
if to_sandbox_dir:
to_path = sandbox_paths[i]
filename = os.path.basename(to_path)
else:
filename = os.path.basename(client_lib_path)
to_path = "%s/%s" % (to_dir, filename)
to_paths.append(to_path)
# copy the file from the repo
to_dir = os.path.dirname(to_path)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
if mode == 'copy':
if os.path.exists(client_lib_path):
if os.path.isdir(client_lib_path):
shutil.copytree(client_lib_path, to_path)
else:
shutil.copy(client_lib_path, to_path)
else:
raise TacticApiException("Path [%s] does not exist" % client_lib_path)
elif mode == 'download':
web_path = web_paths[i]
self.download(web_path, to_dir=to_dir, filename=filename)
else:
raise TacticApiException("Checkout mode [%s] not supported" % mode)
return to_paths
def lock_sobject(self, search_key, context):
'''Locks the context for checking in and out. Locking a context
prevents the ability to checkout or checkin to that context for a
particular sobject.
@params
search_key - the search key of the sobject
context - the context that will be blocked
@return
None
'''
return self.server.lock_sobject(self.ticket, search_key, context)
def unlock_sobject(self, search_key, context):
'''Unocks the context for checking in and out. Locking a context
prevents the ability to checkout or checkin to that context for a
particular sobject.
@params
search_key - the search key of the sobject
context - the context that will be unblocked
@return
None
'''
return self.server.unlock_sobject(self.ticket, search_key, context)
def query_snapshots(self, filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False):
'''API Function: query_snapshots(filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False)
thin wrapper around query, but is specific to querying snapshots
with some useful included flags that are specific to snapshots
@params:
ticket - authentication ticket
filters - (optional) an array of filters to alter the search
columns - (optional) an array of columns whose values should be
retrieved
order_bys - (optional) an array of order_by to alter the search
show_retired - (optional) - sets whether retired sobjects are also
returned
limit - sets the maximum number of results returned
single - returns a single sobject that is not wrapped up in an array
include_paths - flag to specify whether to include a __paths__ property
containing a list of all paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_full_xml - flag to return the full xml definition of a snapshot
include_parent - includes all of the parent attributes in a __parent__ dictionary
include_files - includes all of the file objects referenced in the
snapshots
@return:
list of snapshots
'''
return self.server.query_snapshots(self.ticket, filters, columns, order_bys, show_retired, limit, offset, single, include_paths, include_full_xml, include_paths_dict, include_parent, include_files)
def get_snapshot(self, search_key, context="publish", version='-1', revision=None, level_key=None, include_paths=False, include_full_xml=False, include_paths_dict=False, include_files=False, include_web_paths_dict=False, versionless=False):
'''API Function: get_snapshot(search_key, context="publish", version='-1', level_key=None, include_paths=False, include_full_xml=False, include_paths_dict=False, include_files=False)
Method to retrieve an sobject's snapshot
Retrieve the latest snapshot
@param:
search_key - unique identifier of sobject whose snapshot we are
looking for
@keyparam:
context - the context of the snapshot
version - snapshot version
revision - snapshot revision
level_key - the unique identifier of the level in the form of a search key
include_paths - flag to include a list of paths to the files in this
snapshot.
include_full_xml - whether to include full xml in the return
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_files - includes all of the file objects referenced in the
snapshots
@return:
dictionary - the resulting snapshot
@example:
[code]
search_key = 'prod/asset?project=sample3d&code=chr001'
snapshot = server.get_snapshot(search_key, context='icon', include_files=True)
[/code]
'''
return self.server.get_snapshot(self.ticket, search_key, context, version, revision, level_key, include_paths, include_full_xml, include_paths_dict, include_files, include_web_paths_dict, versionless)
def get_full_snapshot_xml(self, snapshot_code):
'''API Function: get_full_snapshot_xml(snapshot_code)
Retrieve a full snapshot xml. This snapshot definition
contains all the information about a snapshot in xml
@param:
snapshot_code - unique code of snapshot
@return:
string - the resulting snapshot xml
'''
return self.server.get_full_snapshot_xml(self.ticket, snapshot_code)
def set_current_snapshot(self, snapshot_code):
'''API Function: set_current_snapshot(snapshot_code)
Set this snapshot as a "current" snapshot
@param:
snapshot_code - unique code of snapshot
@return:
string - the resulting snapshot xml
'''
return self.server.set_current_snapshot(self.ticket, snapshot_code)
def get_dependencies(self, snapshot_code, mode='explicit', tag='main', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False):
'''API Function: get_dependencies(snapshot_code, mode='explicit', tag='main', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False):
Return the dependent snapshots of a certain tag
@params:
snapshot_code - unique code of a snapshot
@keyparams:
mode - explict (get version as defined in snapshot)
- latest
- current
tag - retrieve only dependencies that have this named tag
include_paths - flag to specify whether to include a __paths__ property
containing all of the paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_files - includes all of the file objects referenced in the
snapshots
repo_mode - client_repo, web, lib, relative
show_retired - defaults to False so that it doesn't show retired dependencies
@return:
a list of snapshots
'''
return self.server.get_dependencies(self.ticket, snapshot_code, mode, tag, include_paths, include_paths_dict, include_files, repo_mode)
def get_all_dependencies(self, snapshot_code, mode='explicit', type='ref', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo'):
'''API Function: get_all_dependencies(snapshot_code, mode='explicit', type='ref', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo')
Retrieve the latest dependent snapshots of the given snapshot
@param:
search_key - unique identifier of sobject whose snapshot we are
looking for
@keyparam:
mode - explicit (get version as defined in snapshot)
- latest
- current
type - one of ref or input_ref
include_paths - flag to specify whether to include a __paths__ property
containing all of the paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_files - includes all of the file objects referenced in the
snapshots
repo_mode - client_repo, web, lib, relative
@return:
list - snapshots
'''
return self.server.get_all_dependencies(self.ticket, snapshot_code, mode, type,\
include_paths, include_paths_dict, include_files, repo_mode)
#
# Task methods
#
def create_task(self, search_key, process="publish", subcontext=None, description=None, bid_start_date=None, bid_end_date=None, bid_duration=None, assigned=None):
'''API Function: create_task(search_key, process="publish", subcontext=None, description=None, bid_start_date=None, bid_end_date=None, bid_duration=None, assigned=None)
Create a task for a particular sobject
@param:
search_key - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
process - process that this task belongs to
subcontext - the subcontext of the process (context = procsss/subcontext)
description - detailed description of the task
bid_start_date - the expected start date for this task
bid_end_date - the expected end date for this task
bid_duration - the expected duration for this task
assigned - the user assigned to this task
@return:
dictionary - task that was created
'''
return self.server.create_task(self.ticket, search_key, process, subcontext, description, bid_start_date, bid_end_date, bid_duration, assigned)
def add_initial_tasks(self, search_key, pipeline_code=None, processes=[]):
'''API Function: add_initial_tasks(search_key, pipeline_code=None, processes=[])
Add initial tasks to an sobject
@param:
search_key - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
pipeline_code - override the sobject's pipeline and use this one instead
processes - create tasks for the given list of processes
@return:
list - tasks created
'''
return self.server.add_initial_tasks(self.ticket, search_key, pipeline_code, processes)
#
# Pipeline methods
#
def get_pipeline_xml(self, search_key):
'''API Function: get_pipeline_xml(search_key)
DEPRECATED: use get_pipeline_xml_info()
Retrieve the pipeline of a specific sobject. The pipeline
return is an xml document and an optional dictionary of information.
@param:
search_key - a unique identifier key representing an sobject
@return:
dictionary - xml and the optional hierarachy info
'''
return self.server.get_pipeline_xml(self.ticket, search_key)
def get_pipeline_processes(self, search_key, recurse=False):
'''API Function: get_pipeline_processes(search_key, recurse=False)
DEPRECATED: use get_pipeline_processes_info()
Retrieve the pipeline processes information of a specific sobject.
@param:
search_key - a unique identifier key representing an sobject
@keyparams:
recurse - boolean to control whether to display sub pipeline processes
@return:
list - process names of the pipeline
'''
return self.server.get_pipeline_processes(self.ticket, search_key, recurse)
def get_pipeline_xml_info(self, search_key, include_hierarchy=False):
'''API Function: get_pipeline_xml_info(search_key, include_hierarchy=False)
Retrieve the pipeline of a specific sobject. The pipeline
returned is an xml document and an optional dictionary of information.
@param:
search_key - a unique identifier key representing an sobject
@keyparam:
include_hierarchy - include a list of dictionary with key info on each process of the pipeline
@return:
dictionary - xml and the optional hierarachy info
'''
return self.server.get_pipeline_xml_info(self.ticket, search_key, include_hierarchy)
def get_pipeline_processes_info(self, search_key, recurse=False, related_process=None):
'''API Function: get_pipeline_processes_info(search_key, recurse=False, related_process=None)
Retrieve the pipeline processes information of a specific sobject. It provides information from the perspective of a particular process if related_process is specified.
@param:
search_key - a unique identifier key representing an sobject
@keyparams:
recurse - boolean to control whether to display sub pipeline processes
related_process - given a process, it shows the input and output processes and contexts
@return:
dictionary - process names of the pipeline or a dictionary if related_process is specified
'''
return self.server.get_pipeline_processes_info(self.ticket, search_key, recurse, related_process)
def execute_pipeline(self, pipeline_xml, package):
'''API Function: execute_pipeline(pipeline_xml, package)
Spawn an execution of a pipeline as delivered from
'get_pipeline_xml()'. The pipeline is a xml document that describes
a set of processes and their handlers
@param:
pipeline_xml - an xml document describing a standard Tactic pipeline.
package - a dictionary of data delivered to the handlers
@return:
instance - a reference to the interpreter
'''
# execute the pipeline
from interpreter import PipelineInterpreter
interpreter = PipelineInterpreter(pipeline_xml)
interpreter.set_server(self)
interpreter.set_package(package)
interpreter.execute()
return interpreter
def commit_session(self, session_xml, pid):
'''Takes a session xml and commits it. Also handles transfer to old
style xml data. Generally, this is executed through the application
package: tactic_client_lib/application/common/introspect.py. However,
this can be done manually if the proper session xml is provided.
@params
ticket - authentication ticket
session_xml - an xml document representing the session. This document
format is described below
@return
session_content object
The session_xml takes the form:
<session>
<ref search_key="prod/shot?project=bar&code=joe" context="model" version="3" revision="2" tactic_node="tactic_joe"/>
</session>
'''
return self.server.commit_session(self.ticket, session_xml, pid)
#
# Directory methods
#
def get_paths(self, search_key, context="publish", version=-1, file_type='main', level_key=None, single=False):
'''API Function: get_paths( search_key, context="publish", version=-1, file_type='main', level_key=None)
Get paths from an sobject
@params:
ticket - authentication ticket
search_key - a unique identifier key representing an sobject
context - context of the snapshot
version - version of the snapshot
level_key - the unique identifier of the level that this
was checked into
@return
A dictionary of lists representing various paths. The paths returned
are as follows:
- client_lib_paths: all the paths to the repository relative to the client
- lib_paths: all the paths to the repository relative to the server
- sandbox_paths: all of the paths mapped to the sandbox
- web: all of the paths relative to the http server
'''
return self.server.get_paths(self.ticket, search_key, context, version, file_type, level_key, single)
def get_base_dirs(self):
'''get all of the base directories defined on the server'''
return self.server.get_base_dirs(self.ticket)
def get_handoff_dir(self):
'''API Function: get_handoff_dir()
Return a temporary path that files can be copied to
@return:
string - the directory to copy a file to handoff to Tactic without having to
go through http protocol
'''
if self.handoff_dir:
return self.handoff_dir
handoff_dir = self.server.get_handoff_dir(self.ticket)
if not os.path.exists(handoff_dir):
os.makedirs(handoff_dir)
self.handoff_dir = handoff_dir
return handoff_dir
def clear_upload_dir(self):
'''API Function: clear_upload_dir()
Clears the upload directory to ensure clean checkins
@param:
None
@keyparam:
None
@return:
None
'''
return self.server.clear_upload_dir(self.ticket)
def get_client_dir(self, snapshot_code, file_type='main', mode='client_repo'):
'''API Function: get_client_dir(snapshot_code, file_type='main', mode='client_repo')
Get a dir segment from a snapshot
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
mode - Forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
@return:
string - directory segment for a snapshot and file type
@example:
If the tactic_<SERVER_OS>-conf.xml configuration file contains the following:
[code]
<win32_client_repo_dir>T:/assets</win32_client_repo_dir>
[/code]
and if the call to the method is as follows:
[code]
snapshot = server.create_snapshot(search_key, context)
code = snapshot.get('code')
server.get_path_from_snapshot(snapshot.get('code'))
[/code]
Then, on a Windows client, get_client_dir() will return:
[code]
T:/assets/sample3d/asset/chr/chr003/scenes
[/code]
'''
return self.server.get_client_dir(self.ticket, snapshot_code, file_type, mode)
def get_path_from_snapshot(self, snapshot_code, file_type='main', mode='client_repo'):
'''API Function: get_path_from_snapshot(snapshot_code, file_type='main')
Get a full path from a snapshot
@param:
snapshot_code - the unique code / search_key of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
mode - Forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
@return:
string - the directory to copy a file to handoff to Tactic without having to
go through http protocol
@example:
If the tactic_<SERVER_OS>-conf.xml configuration file contains the following:
[code]
<win32_client_repo_dir>T:/assets</win32_client_repo_dir>
[/code]
and if the call to the method is as follows:
[code]
snapshot = server.create_snapshot(search_key, context)
code = snapshot.get('code')
server.get_path_from_snapshot(snapshot.get('code'))
# in a trigger
snapshot_key = self.get_input_value("search_key")
server.get_path_from_snapshot(snapshot_key)
[/code]
Then, on a Windows client, get_path_from_snapshot() will return:
[code]
T:/assets/sample3d/asset/chr/chr003/scenes/chr003_rig_v003.txt
[/code]
'''
return self.server.get_path_from_snapshot(self.ticket, snapshot_code, file_type, mode)
def get_expanded_paths_from_snapshot(self, snapshot_code, file_type='main'):
'''API Function: get_expanded_paths_from_snapshot(snapshot_code, file_type='main')
Return the expanded path of a snapshot (used for
ranges of files)
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
@return:
string - path
'''
return self.server.get_expanded_paths_from_snapshot(self.ticket, snapshot_code, file_type)
def get_all_paths_from_snapshot(self, snapshot_code, mode='client_repo', expand_paths=False, filename_mode=''):
'''API Function: get_all_paths_from_snapshot(snapshot_code, mode='client_repo', expand_paths=False, filename_mode='')
Get all paths from snapshot
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
mode - forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
expand_paths - expand the paths of a sequence check-in or for a directory check-in, it will list the contents of the directory as well
filename_mode - source or '', where source reveals the source_path of the check-in
@return:
list - paths
'''
return self.server.get_all_paths_from_snapshot(self.ticket, snapshot_code, mode, expand_paths, filename_mode)
def get_preallocated_path(self, snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext=''):
'''API Function: get_preallocated_path(snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext='')
Get the preallocated path for this snapshot. It assumes that
this checkin actually exists in the repository and will create virtual
entities to simulate a checkin. This method can be used to determine
where a checkin will go. However, the snapshot must exist
using create_snapshot() or some other method. For a pure virtual naming
simulator, use get_virtual_snapshot_path().
@param:
snapshot_code - the code of a preallocated snapshot. This can be
create by get_snapshot()
@keyparam:
file_type - the type of file that will be checked in. Some naming
conventions make use of this information to separate directories
for different file types
file_name - the desired file name of the preallocation. This information
may be ignored by the naming convention or it may use this as a
base for the final file name
mkdir - an option which determines whether the directory of the
preallocation should be created
protocol - It's either client_repo, sandbox, or None. It determines whether the
path is from a client or server perspective
ext - force the extension of the file name returned
@return:
string - the path where add_file() expects the file to be checked into
@example:
it saves time if you get the path and copy it to the final destination first.
[code]
snapshot = self.server.create_snapshot(search_key, context)
snapshot_code = snapshot.get('code')
file_name = 'input_file_name.txt'
orig_path = 'C:/input_file_name.txt'
path = self.server.get_preallocated_path(snapshot_code, file_type, file_name)
# the path where it is supposed to go is generated
new_dir = os.path.dirname(path)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
shutil.copy(orig_path, path)
self.server.add_file(snapshot_code, path, file_type, mode='preallocate')
[/code]
'''
return self.server.get_preallocated_path(self.ticket, snapshot_code, file_type, file_name, mkdir, protocol, ext)
def get_virtual_snapshot_path(self, search_key, context, snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext=''):
'''API Function: get_virtual_snapshot_path(search_key, context, snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext='')
Create a virtual snapshot and returns a path that this snapshot
would generate through the naming conventions. This is most useful
testing naming conventions.
@param:
snapshot creation:
-----------------
search_key - a unique identifier key representing an sobject
context - the context of the checkin
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
level_key - the unique identifier of the level that this
is to be checked into
@keyparam:
path creation:
--------------
file_type - the type of file that will be checked in. Some naming
conventions make use of this information to separate directories
for different file types
file_name - the desired file name of the preallocation. This information
may be ignored by the naming convention or it may use this as a
base for the final file name
mkdir - an option which determines whether the directory of the
preallocation should be created
protocol - It's either client_repo, sandbox, or None. It determines whether the
path is from a client or server perspective
ext - force the extension of the file name returned
@return:
string - path as determined by the naming conventions
'''
return self.server.get_virtual_snapshot_path(self.ticket, search_key, context, snapshot_type, level_key, file_type, file_name, mkdirs, protocol, ext)
# NOTE: this is very specific to the Maya tools and can be considered
# deprecated
def get_md5_info(self, md5_list, new_paths, parent_code, texture_cls, file_group_dict, project_code, mode):
'''API Function: get_md5_info(md5_list, texture_codes, new_paths, parent_code, texture_cls, file_group_dict, project_code)
Get md5 info for a given list of texture paths, mainly returning if this md5 is a match or not
@param:
md5_list - md5_list
new_paths - list of file_paths
parent_code - parent code
texture_cls - Texture or ShotTexture
file_group_dict - file group dictionary storing all the file groups
project_code - project_code
mode - texture matching mode (md5, file_name)
@return:
dictionary - a dictionary of path and a subdictionary of is_match, repo_file_code, repo_path, repo_file_range
'''
return self.server.get_md5_info(self.ticket, md5_list, new_paths, parent_code, texture_cls, file_group_dict, project_code, mode )
#
# UI methods
#
def get_widget(self, class_name, args={}, values={}):
'''API Function: get_widget(class_name, args={}, values={})
Get a defined widget
@params:
class_name - the fully qualified class name of the widget
@keyparams:
args - keyword arguments required to create a specific widget
values - form values that are passed in from the interface
@return:
string - html form of the widget
@example:
class_name = 'TableLayoutWdg'
args = {
'view': 'manage',
'search_type': 'prod/asset',
}
widget = server.get_widget(class_name, args))
'''
return self.server.get_widget(self.ticket, class_name, args)
def class_exists(self, class_path):
'''determines if a class exists on the server
@params
class_path - fully qualified python class path
@return
boolean: true if class exists and can be seen
'''
return self.server.class_exists(self.ticket, class_path)
def execute_python_script(self, script_path):
'''API Function: execute_python_script(class_name, args={}, values={})
Execute a command
@param:
script_path - script path in Script Editor, e.g. test/eval_sobj
@return:
dictionary - returned data structure
'''
return self.server.execute_python_script(self.ticket, script_path)
def execute_cmd(self, class_name, args={}, values={}):
'''API Function: execute_cmd(class_name, args={}, values={})
Execute a command
@param:
class_name - the fully qualified class name of the widget
@keyparam:
args - keyword arguments required to create a specific widget
values - form values that are passed in from the interface
@return:
string - description of command
'''
return self.server.execute_cmd(self.ticket, class_name, args, values)
#
# Widget Config methods
#
def get_config_definition(self, search_type, view, element_name, personal=False):
'''API Function: get_config_definition(search_type, view, element_name)
Get the widget configuration definition for an element
@param:
search_type - search type that this config relates to
view - view to look for the element
element_name - name of the element
@keyparam:
personal - True if it is a personal definition
@return:
string - xml of the configuration
'''
return self.server.get_config_definition(self.ticket, search_type, view, element_name, personal)
def update_config(self, search_type, view, element_names):
'''API Function: update_config(search_type, view, element_names)
Update the widget configuration like ordering for a view
@param:
search_type - search type that this config relates to
view - view to look for the element
element_names - element names in a list
@return:
string - updated config xml snippet
'''
return self.server.update_config(self.ticket, search_type, view, element_names)
def add_config_element(self, search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={},login=None, unique=True, auto_unique_name=False, auto_unique_view=False):
'''API Function: add_config_element(search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={},login=None, unique=True, auto_unique_name=False, auto_unique_view=False)
This method adds an element into a config. It is used by various
UI components to add new widget element to a particular view.
@param:
search_type - the search type that this config belongs to
view - the specific view of the search type
name - the name of the element
@keyparam:
class_name - the fully qualified class of the display
action_class_name - the fully qualified class of the action
display_options - keyward options in a dictionary to construct the specific display
action_options - keyward options in a dictionary to construct the specific action
element_attrs - element attributes in a dictionary
login - login name if it is for a specific user
unique - add an unique element if True. update the element if False.
auto_unique_name - auto generate a unique element and display view name
auto_unique_view - auto generate a unique display view name
@return:
boolean - True
@example:
This will add a new element to the "character" view for a 3D asset
[code]
search_type = 'prod/asset'
view = 'characters'
class_name = 'tactic.ui.common.SimpleElementWdg'
server.add_config_element(search_type, view, class_name)
[/code]
This will add a new element named "user" to the "definition" view. It contains detailed display and action nodes
[code]
data_dict = {} # some data here
search_type = 'prod/asset'
server.add_config_element(search_type, 'definition', 'user', class_name = data_dict['class_name'], display_options=data_dict['display_options'], element_attrs=data_dict['element_attrs'], unique=True, action_class_name=data_dict['action_class_name'], action_options=data_dict['action_options'])
[/code]
'''
return self.server.add_config_element(self.ticket, search_type, view, name,\
class_name, display_options, action_class_name, action_options, element_attrs, login, unique, auto_unique_name, auto_unique_view)
def _setup(self, protocol="xmlrpc"):
# if this is being run in the tactic server, have the option
# to use TACTIC code directly
if protocol == 'local':
# import some server libraries
from pyasm.biz import Project
from pyasm.common import Environment
from pyasm.prod.service import ApiXMLRPC
from pyasm.web import WebContainer
# set the ticket
security = Environment.get_security()
if not security:
raise TacticApiException("Security not initialized. This may be because you are running the client API in 'local' mode without run initializing Batch")
# set the project
project_code = Project.get_project_code()
self.set_project(project_code)
# set the ticket
ticket = security.get_ticket_key()
self.set_ticket(ticket)
# set the protocol to local for the api class
# note ticket has to be set first
self.server = ApiXMLRPC()
self.server.set_protocol(protocol)
# if server name has already been set, use that one
if self.server_name:
self.has_server = True
return
web = WebContainer.get_web()
if web:
self.server_name = web.get_http_host()
if self.server_name:
self.has_server = True
else:
# else guess that it is localhost
self.server_name = "localhost"
self.has_server = True
return
elif protocol =='xmlrpc':
# get the env variables
env_user = os.environ.get('TACTIC_USER')
env_password = os.environ.get('TACTIC_PASSWORD')
env_server = os.environ.get('TACTIC_SERVER')
env_ticket = os.environ.get('TACTIC_TICKET')
env_project = os.environ.get('TACTIC_PROJECT')
# if all three are set, then it is not necessary to look at
# the resource file
if not (env_server and (env_user or env_ticket) and env_project):
# need to scope by user
# this is dealt with in get_resource_path already
#if not self.login:
# self.login = getpass.getuser()
file_path = self.get_resource_path()
if not os.path.exists(file_path):
msg = "[%s] does not exist yet. There is not enough information to authenticate the server. Either set the appropriate environment variables or run get_ticket.py" %file_path
raise TacticApiException(msg)
# try to open the resource file
file = open(file_path)
lines = file.readlines()
file.close()
rc_server = None
rc_ticket = None
rc_project = None
for line in lines:
line = line.strip()
if line.startswith("#"):
continue
name, value = line.split("=")
if name == "server":
#self.set_server(value)
rc_server = value
elif name == "ticket":
#self.set_ticket(value)
rc_ticket = value
elif name == "project":
#self.set_project(value)
rc_project = value
elif name == "login":
#self.set_project(value)
rc_login = value
# these have to be issued in the correct order
if rc_server:
self.set_server(rc_server)
if rc_project:
self.set_project(rc_project)
if rc_ticket:
# get the project
project = self.get_project()
# set a default if one does not exist
if not project:
self.set_project("admin")
self.set_ticket(rc_ticket)
if rc_login:
self.login = rc_login
# override with any environment variables that are set
if env_server:
self.set_server(env_server)
if env_project:
self.set_project(env_project)
if env_user:
# try to get a ticket with a set password
ticket = self.get_ticket(env_user, env_password)
self.set_ticket(ticket)
if env_ticket:
self.set_ticket(env_ticket)
#self.server.set_protocol(protocol)
#
# Doc methods
#
def get_doc_link(self, alias):
return self.server.get_doc_link(self.ticket, alias);
#
# API/Server Version functions
#
def get_release_version(self):
# DEPRECATED
print("WARNING: Deprecated function 'get_release_version'")
return self.server.get_release_version(self.ticket)
def get_server_version(self):
'''API Function: get_server_version()
@return:
string - server version'''
return self.server.get_server_version(self.ticket)
def get_server_api_version(self):
'''API Function: get_server_api_version()
@return:
string - server API version'''
version = self.server.get_server_api_version(self.ticket)
return version
def get_client_version(self):
'''API Function: get_client_version()
@return:
string - Version of TACTIC that this client came from'''
# may use pkg_resources in 2.6
if '.zip' in __file__:
import zipfile
parts = __file__.split('.zip')
zip_name = '%s.zip'%parts[0]
if zipfile.is_zipfile(zip_name):
z = zipfile.ZipFile(zip_name)
version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION')
version = version.strip()
z.close()
else:
dir = os.path.dirname(__file__)
f = open('%s/VERSION' % dir, 'r')
version = f.readline().strip()
f.close()
return version
def get_client_api_version(self):
'''API Function: get_client_api_version()
@return:
string - client api version'''
# may use pkg_resources in 2.6
if '.zip' in __file__:
import zipfile
parts = __file__.split('.zip')
zip_name = '%s.zip'%parts[0]
if zipfile.is_zipfile(zip_name):
z = zipfile.ZipFile(zip_name)
version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION_API')
version = version.strip()
z.close()
else:
dir = os.path.dirname(__file__)
f = open('%s/VERSION_API' % dir, 'r')
version = f.readline().strip()
f.close()
return version
server = None
def get(cls, protocol=''):
'''get function which treats the server stub as a singleton'''
try:
from pyasm.common import Container
server = Container.get("TacticServerStub")
if not server:
from pyasm.common import Environment
app_server = Environment.get_app_server()
if protocol:
server = TacticServerStub(protocol=protocol)
elif app_server in ["batch", "xmlrpc"]:
server = TacticServerStub(protocol='local')
else:
server = TacticServerStub()
Container.put("TacticServerStub", server)
return server
except ImportError as e:
if not cls.server:
cls.server = TacticServerStub(protocol='xmlrpc')
return cls.server
get = classmethod(get)
def set(cls, server=None):
try:
from pyasm.common import Container
Container.put("TacticServerStub", server)
except ImportError:
cls.server = server
set = classmethod(set)
#
# Objects
#
class Command(object):
def get_description(self):
return "No description"
def execute_cmd(self):
self.server = TacticServerStub()
self.server.start(self.get_description())
try:
self.execute()
except Exception as e:
self.server.abort()
raise
else:
self.server.finish()
def execute(self):
self.execute()
class Search(object):
pass
class SObject(dict):
def get_search_key(self):
return self['__search_key__']
| epl-1.0 |
40223141/w16b_test | static/Brython3.1.1-20150328-091302/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
mattilyra/scikit-learn | sklearn/metrics/regression.py | 31 | 17366 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value corresponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
ivandevp/django | tests/postgres_tests/models.py | 231 | 3562 | from django.db import connection, models
from .fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
)
class PostgreSQLModel(models.Model):
class Meta:
abstract = True
required_db_vendor = 'postgresql'
class IntegerArrayModel(PostgreSQLModel):
field = ArrayField(models.IntegerField())
class NullableIntegerArrayModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), blank=True, null=True)
class CharArrayModel(PostgreSQLModel):
field = ArrayField(models.CharField(max_length=10))
class DateTimeArrayModel(PostgreSQLModel):
datetimes = ArrayField(models.DateTimeField())
dates = ArrayField(models.DateField())
times = ArrayField(models.TimeField())
class NestedIntegerArrayModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.IntegerField()))
class OtherTypesArrayModel(PostgreSQLModel):
ips = ArrayField(models.GenericIPAddressField())
uuids = ArrayField(models.UUIDField())
decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2))
class HStoreModel(PostgreSQLModel):
field = HStoreField(blank=True, null=True)
class CharFieldModel(models.Model):
field = models.CharField(max_length=16)
class TextFieldModel(models.Model):
field = models.TextField()
# Only create this model for postgres >= 9.2
if connection.vendor == 'postgresql' and connection.pg_version >= 90200:
class RangesModel(PostgreSQLModel):
ints = IntegerRangeField(blank=True, null=True)
bigints = BigIntegerRangeField(blank=True, null=True)
floats = FloatRangeField(blank=True, null=True)
timestamps = DateTimeRangeField(blank=True, null=True)
dates = DateRangeField(blank=True, null=True)
class RangeLookupsModel(PostgreSQLModel):
parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True)
integer = models.IntegerField(blank=True, null=True)
big_integer = models.BigIntegerField(blank=True, null=True)
float = models.FloatField(blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True)
date = models.DateField(blank=True, null=True)
else:
# create an object with this name so we don't have failing imports
class RangesModel(object):
pass
class RangeLookupsModel(object):
pass
# Only create this model for postgres >= 9.4
if connection.vendor == 'postgresql' and connection.pg_version >= 90400:
class JSONModel(models.Model):
field = JSONField(blank=True, null=True)
else:
# create an object with this name so we don't have failing imports
class JSONModel(object):
pass
class ArrayFieldSubclass(ArrayField):
def __init__(self, *args, **kwargs):
super(ArrayFieldSubclass, self).__init__(models.IntegerField())
class AggregateTestModel(models.Model):
"""
To test postgres-specific general aggregation functions
"""
char_field = models.CharField(max_length=30, blank=True)
integer_field = models.IntegerField(null=True)
boolean_field = models.NullBooleanField()
class StatTestModel(models.Model):
"""
To test postgres-specific aggregation functions for statistics
"""
int1 = models.IntegerField()
int2 = models.IntegerField()
related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True)
class NowTestModel(models.Model):
when = models.DateTimeField(null=True, default=None)
| bsd-3-clause |
keitaroyam/yamtbx | yamtbx/dataproc/myspotfinder/command_line/make_html_report.py | 1 | 20241 | import os
import re
import time
import datetime
import collections
import glob
import pysqlite2.dbapi2 as sqlite3
import cPickle as pickle
import numpy
import matplotlib
matplotlib.use('Agg') # Allow to work without X
from PIL import Image
import iotbx.phil
from yamtbx.util import rotate_file
from yamtbx.dataproc.myspotfinder import shikalog
from yamtbx.dataproc.myspotfinder.command_line.spot_finder_gui import Stat
from yamtbx.dataproc.dataset import re_pref_num_ext
from yamtbx.dataproc import bl_logfiles
master_params_str = """\
target_dir = None
.type = path
rotate = False
.type = bool
.help = backup (rotate) old files
mode = *normal zoo
.type = choice
plot = *grid circle
.type = choice
"""
def plot_heatmap(subplot, xs, ys, ds, scaninfo):
#import scipy.interpolate
xlim = min(xs), max(xs)
ylim = min(ys), max(ys)
if scaninfo is not None:
vs, hs = scaninfo.vstep*1000., scaninfo.hstep*1000.
if scaninfo.vpoints == 1: vs = 5
if scaninfo.hpoints == 1: hs = 5
else:
vs, hs = 5, 5
zi = numpy.zeros((int((ylim[1]-ylim[0])/vs+1.5),
int((xlim[1]-xlim[0])/hs+1.5)))
for x, y, d in zip(xs, ys, ds):
i,j = int((y-ylim[0])/vs+0.5), int((x-xlim[0])/hs+0.5)
zi[i,j] = d
p1 = subplot.imshow(zi, origin='lower',
extent=[min(xs)-hs/2, max(xs)+hs/2,
min(ys)-vs/2, max(ys)+vs/2],
interpolation='none', cmap="YlOrRd")#PuRd
return p1
# plot_heatmap()
def plot_circles(subplot, xs, ys, ds, zero_xs, zero_ys):
def normalize(v, m=100., sd=60.):
vm = float(sum(v))/float(len(v))
vsd = math.sqrt(sum(map(lambda x:(x-vm)**2, v))/float(len(v)))
if vsd < 1e-12:
return [m for x in xrange(len(v))]
return map(lambda x:sd*(x-vm)/vsd+m, v)
# normalize()
def normalize_max(v, maximum=400.):
max_v = max(v)
f = maximum / max_v if max_v > 0 else 1.
return map(lambda x:f*x + 1., v) # add 1 to make zero-value pickable
# normalize_max()
p1 = subplot.scatter(xs, ys, s=normalize_max(ds), c=ds, alpha=0.5)
p2 = subplot.scatter(zero_xs, zero_ys, s=50, marker="x", c=[0]*len(zero_xs), alpha=0.5)
return p1, p2
# plot_circles()
def prepare_plot(plot_data, f, kind, wdir, rotate=False, plot_grid=True):
def normalize_max(v, maximum=400.):
max_v = max(v)
f = maximum / max_v if max_v > 0 else 1.
return map(lambda x:f*x + 1., v) # add 1 to make zero-value pickable # XXX when max_v is Inf?
# normalize_max()
scan_prefix = f[:f.index(" ")] if " (phi=" in f else f
pngout = os.path.join(wdir, "plot_%s%s.png" % (scan_prefix, kind))
if rotate:
rotate_file(pngout)
xs, ys, ds, imgfs = [], [], [], []
zero_xs, zero_ys = [], [] # For values of zero
for imgf, stat in plot_data[f]:
gc = stat.grid_coord
if gc is None:
continue
x, y = gc
x *= 1000.
y *= 1000.
d = stat.stats[("n_spots","total_integrated_signal","median_integrated_signal").index(kind)]
xs.append(x)
ys.append(y)
ds.append(d)
imgfs.append(imgf)
if d == 0:
zero_xs.append(x)
zero_ys.append(y)
if len(xs) == 0:
return "", ""
win = (max(xs)-min(xs)+1000)/1000*400/80*1.7 # ad-hoc scale
hin = (max(ys)-min(ys)+1000)/1000*400/80
fig = matplotlib.figure.Figure(figsize=(win,hin), dpi=80) # figsize in inches
ax = fig.add_subplot(111)
#p = ax.scatter(xs, ys, s=normalize_max(ds), c=ds, alpha=0.5) # s in points^2
scaninfo = plot_data[f][0][1].scan_info
if plot_grid:
p = plot_heatmap(ax, xs, ys, ds, scaninfo)
else:
p, _ = plot_circles(ax, xs, ys, ds, zero_xs, zero_ys)
if max(ds) - min(ds) > 1e-5:
fig.colorbar(p)
ax.scatter(zero_xs, zero_ys, s=50, marker="x", c=[0]*len(zero_xs), alpha=0.5)
ax.set_xlabel("horizontal [um]")
ax.set_ylabel("vertical [um]")
if scaninfo is not None:
vp, hp = scaninfo.vpoints, scaninfo.hpoints
vs, hs = scaninfo.vstep*1000., scaninfo.hstep*1000.
if 1 in (vp, hp) or len(plot_data[f]) <= hp:
ax.set_aspect("auto")
else:
ax.set_aspect("equal")
if vp == hp == 1:
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
elif vp == 1:
ax.set_xlim(min(xs) - hs, max(xs) + hs)
ax.set_ylim(-10, 10)
elif hp == 1:
ax.set_xlim(-10, 10)
ax.set_ylim(min(ys) - vs, max(ys) + vs)
else:
ax.set_xlim(min(xs) - hs, max(xs) + hs)
ax.set_ylim(min(ys) - vs, max(ys) + vs)
else:
# Should never reach here.. but should we set limit here?
vs, hs = 5, 5
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
canvas.print_figure(pngout+".tmp", dpi=80, format="png")
img_width = fig.get_figwidth() * 80
img_height = fig.get_figheight() * 80
map_str = '<map name="%smap">\n' % scan_prefix
for x, y, imgf in zip(xs, ys, imgfs):
if plot_grid:
tx1, ty1 = ax.transData.transform((x-hs/2.,y-vs/2.))
tx2, ty2 = ax.transData.transform((x+hs/2.,y+vs/2.))
map_str += ' <area shape="rect" coords="%.2f,%.2f,%.2f,%.2f" title="%s" onClick=\'plotClick("%s", "%s")\'>\n' % (tx1, img_height-ty1, tx2, img_height-ty2, os.path.basename(imgf), scan_prefix, os.path.basename(imgf))
else:
tx, ty = ax.transData.transform((x,y))
map_str += ' <area shape="circle" coords="%.2f,%.2f,10" title="%s" onClick=\'plotClick("%s", "%s")\'>\n' % (tx, img_height-ty, os.path.basename(imgf), scan_prefix, os.path.basename(imgf))
map_str += "</map>"
return pngout, map_str
# prepare_plot()
def make_html_report(current_stats, wdir, htmlout, zoo_mode, rotate=False, plot_grid=True):
#plot_data = self.plotFrame.data
shikalog.info("Making HTML report for %s"%wdir)
startt = time.time()
plot_data = collections.OrderedDict()
for f, stat in current_stats.items():
if stat is None: continue
fpref = decide_fpref(f, stat.scan_info)
plot_data.setdefault(fpref, []).append((f, stat))
#if gui_params.mode == "zoo": htmlout = os.path.join(wdir, "report_zoo.html")
#else: htmlout = os.path.join(wdir, "report.html")
if rotate: rotate_file(htmlout)
if zoo_mode: assert len(plot_data) <= 1
kinds = ("total_integrated_signal", "median_integrated_signal", "n_spots")
plots=""
pngs = []
for f in plot_data:
scan_prefix = f[:f.index(" ")] if " (phi=" in f else f
info = plot_data[f][0][1].scan_info
if info is None: info = bl_logfiles.ScanInfo() # Empty info
plots += '<table border=0 style="margin-bottom:0px">\n <tr><td>\n'
if zoo_mode:
try:
im = Image.open(os.path.join(wdir, "../../../before.ppm"))
im.save(os.path.join(wdir, "loop_before.jpg"))
except:
import traceback
print "Can't convert loop image"
print traceback.format_exc()
plots += ' Loop image</td><td><img src="loop_before.jpg" /></td></tr>\n'
plots += ' <tr><td>\n'
plots += ' <table class="info"><tr><th>scan</th><td>%s</td></tr>\n' % scan_prefix
plots += ' <tr><th>date</th><td>%s</td></tr>\n' % (info.date.strftime("%Y/%m/%d %H:%M:%S") if info.date!=0 else "??")
if info.is_shutterless():
plots += ' <tr><th>fixed spindle</th><td>%.2f°</td></tr>\n' % info.fixed_spindle
plots += ' <tr><th>frame rate</th><td>%.2f [Hz]</td></tr>\n' % info.frame_rate
else:
plots += ' <tr><th>osc. start</th><td>%.2f°</td></tr>\n' % info.osc_start
plots += ' <tr><th>osc. step</th><td>%.2f°</td></tr>\n' % info.osc_step
plots += ' <tr><th>exp. time</th><td>%.2f [sec]</td></tr>\n' % info.exp_time
plots += ' <tr><th>beam size</th><td>h= %.1f, v= %.1f [μm]</td></tr>\n' % (info.beam_hsize, info.beam_vsize)
plots += ' <tr><th>attenuator</th><td>%s %.1f [μm]</td></tr>\n' % info.attenuator
plots += ' <tr><th>distance</th><td>%.2f [mm]</td></tr>\n' % info.distance
plots += ' <tr><th>wavelength</th><td>%.4f [Å]</td></tr>\n' % info.wavelength
plots += ' <tr><th>scan points</th><td>v=%d, h=%d</td></tr>\n' % (info.vpoints, info.hpoints)
plots += ' <tr><th>scan steps</th><td>v=%.2f, h=%.2f [μm]</td></tr>\n' % (info.vstep*1000., info.hstep*1000.)
plots += ' </table>\n'
for i, kind in enumerate(kinds):
pngout, mapstr = prepare_plot(plot_data, f, kind, wdir, rotate, plot_grid)
pngs.append(pngout) # rename later
adds = ""
if i == 0:
plots += ' <td><img name="%s" src="%s" usemap="#%smap" /><br />\n' % (scan_prefix, os.path.basename(pngout), scan_prefix)
plots += '<form>\n'
adds = ' checked="checked"'
plots += '<input type="radio" name="spot_mode" value="%s" onClick="changeplot(this, \'%s\')"%s />%s<br />\n' % (kind, scan_prefix, adds, kind)
plots += '</form>%s</td></tr></table><br>\n\n' % mapstr # The last mapstr is used. This is dirty way, though.
plots += '<table border=0 style="margin-bottom:20px">\n <tr><td>\n'
plots += '<td style="border:solid 1px #999"><canvas id="%scanvas" width=600 height=600></canvas>\n' % scan_prefix
plots += '<td id="%sinfo" valign="top"></tr></table>\n\n' % scan_prefix
result = current_stats.items()
if len(result) == 0:
shikalog.warning("No results found. Exiting. %s"% wdir)
return
dbfile = os.path.join(wdir, "shika.db")
con = sqlite3.connect(dbfile, timeout=10, isolation_level=None)
con.execute('pragma query_only = ON;')
print "Reading data from DB for making report html."
c = con.execute("select filename,spots from spots")
dbspots = dict(map(lambda x: (str(x[0]), pickle.loads(str(x[1]))), c.fetchall()))
spot_data = "var spot_data = {"
for i, (f, stat) in enumerate(result):
if stat is None: continue
bf = os.path.basename(f)
spots = dbspots[bf]["spots"]
thumb_posmag = dbspots[bf]["thumb_posmag"]
r = re.search("^(.*)_([0-9]+)\.[^0-9]+$", bf)
prefix, num = r.group(1), int(r.group(2))
spot_data += '"%s":[[' % bf
for y,x,snr,d in spots:
#x, y = spot.max_pxl_y(), spot.max_pxl_x()
pos = thumb_posmag[0:2]
mag = thumb_posmag[2]
x, y = (x - pos[0])*mag, (y - pos[1])*mag
spot_data += "[%d,%d]," % (x, y)
spot_data += "], %.1f, %.1f, %d, %d]," % (stat.stats[1], stat.stats[2], stat.stats[0], num)
spot_data += "};"
spot_data = spot_data.replace("inf,", "Infinity,").replace("nan,", "NaN,")
con.close()
# Determine img picture extension
img_ext = ".png" if os.path.exists(os.path.join(wdir, os.path.basename(result[0][0])+".png")) else ".jpg"
jpg_dirs = "var jpg_dirs = {"
flag_tiled_jpg = False
if glob.glob(os.path.join(wdir, "thumb_*")):
for res in result:
r = re.search("^(.*)_([0-9]+)\.[^0-9]+$", os.path.basename(res[0]))
prefix, num = r.group(1), int(r.group(2))
jd = os.path.join("thumb_%s_%.3d" % (prefix, num//1000))
if not os.path.exists(jd): flag_tiled_jpg = True # THIS MAY CAUSE A PROBLEM..
jpg_dirs += '"%s":"%s",' % (os.path.basename(res[0]), jd)
else:
for res in result:
jpg_dirs += '"%s":".",' % os.path.basename(res[0])
jpg_dirs += "};"
ofs = open(htmlout, "w")
ofs.write("""\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>SHIKA report</title>
<script type="text/javascript">
<!--
function changeplot(obj, name){
document.images[name].src = "plot_"+name+obj.value+".png";
}
%(spot_data)s
%(jpg_dirs)s
""" % dict(spot_data=spot_data,
jpg_dirs=jpg_dirs if not flag_tiled_jpg else ""))
if flag_tiled_jpg: # FOR TILED JPEG
ofs.write("""\
function plotClick(scanprefix, imgfile) {
var f = imgfile;
var data = spot_data[f];
var img = new Image();
var idx = Math.floor((data[4]-1)/100);
var n1 = idx*100+1;
var n2 = (idx+1)*100;
img.src = "thumb_" + scanprefix.slice(0,-1) + "/" + scanprefix + ("00000"+n1).slice(-6) + "-" + ("00000"+n2).slice(-6) + ".jpg"; // prefix ends with _
var idx2 = (data[4]-1)%%100;
var sx = idx2%%10;
var sy = Math.floor(idx2/10);
img.onload = (function(fn){
return function(){
var td = document.getElementById(scanprefix+"info");
td.innerHTML = "<table border=0><tr><td>File name: <td>" + imgfile + "<tr><td>total signal: <td>" + data[1] + "<tr><td>median signal: <td>" + data[2] + "<tr><td>N_spots: <td>" + data[3] + "</table>";
var t = data[0];
var canvas = document.getElementById(scanprefix+"canvas");
var ctx = canvas.getContext('2d');
ctx.clearRect(0,0,canvas.width,canvas.height);
ctx.drawImage(this, sx*600, sy*600, 600, 600, 0, 0, 600, 600);
""" % dict(img_ext=img_ext))
else: # FOR SINGLE JPEGs
ofs.write("""\
function plotClick(scanprefix, imgfile) {
var f = imgfile;
var data = spot_data[f];
var img = new Image();
img.src = jpg_dirs[f] + "/" + f + "%(img_ext)s";
img.onload = (function(fn){
return function(){
var td = document.getElementById(scanprefix+"info");
td.innerHTML = "<table border=0><tr><td>File name: <td>" + imgfile + "<tr><td>total signal: <td>" + data[1] + "<tr><td>median signal: <td>" + data[2] + "<tr><td>N_spots: <td>" + data[3] + "</table>";
var t = data[0];
var canvas = document.getElementById(scanprefix+"canvas");
var ctx = canvas.getContext('2d');
ctx.clearRect(0,0,canvas.width,canvas.height);
ctx.drawImage(this, 0, 0);
""" % dict(img_ext=img_ext))
# Common parts
ofs.write("""\
for (var i = 0; i < t.length; i++) {
ctx.rect(t[i][0]-6, t[i][1]-6, 12, 12);
}
ctx.strokeStyle = "red";
ctx.lineWidth = 1;
ctx.stroke();
var center = [300,300];
ctx.beginPath();
ctx.strokeStyle = "blue";
ctx.moveTo(center[0]-10, center[1]);
ctx.lineTo(center[0]+10, center[1]);
ctx.moveTo(center[0], center[1]-10);
ctx.lineTo(center[0], center[1]+10);
ctx.stroke();
}
}(f));
}
//-->
</script>
<style type="text/css">
<!--
table.info {
border-collapse: separate;
border-spacing: 7px;
}
table.info th {
text-align: left;
}
table.images {
border-collapse: collapse;
border: solid 1px #999;
}
table.images caption {
margin-top: 1em;
text-align: left;
}
table.images th,
table.images td {
border: solid 1px #999;
}
table.images th {
background: #E6E6E6;
text-align: center;
white-space: nowrap;
}
-->
</style>
</head>
<body>
<h1>SHIKA report</h1>
<div align="right">
Created on %(date)s<br>
Original directory: %(wdir)s
</div>
<hr style="height: 1px;border: none;border-top: 1px #000000 dotted;" />
%(plots)s
</body>
</html>
""" % dict(plots=plots,
date=datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S"),
wdir=wdir,
))
shikalog.debug("Renaming png files in %s" % wdir)
for png in pngs:
os.rename(png+".tmp", png)
delt = time.time() - startt
shikalog.info("HTML making Done (took %f s). Open? firefox %s"% (delt, htmlout))
# make_html_report()
def load_results(target_dir):
current_stats = collections.OrderedDict()
dbfile = os.path.join(target_dir, "_spotfinder", "shika.db")
if not os.path.isfile(dbfile):
shikalog.error("%s not found." % dbfile)
return
scanlog = os.path.join(target_dir, "diffscan.log")
if not os.path.isfile(scanlog):
shikalog.error("diffscan.log not found in %s" % target_dir)
return
slog = bl_logfiles.BssDiffscanLog(scanlog)
slog.remove_overwritten_scans()
shikalog.info("Loading data: %s" % dbfile)
startt = time.time()
result = []
con = sqlite3.connect(dbfile, timeout=10, isolation_level=None)
shikalog.debug("Opening db with query_only = ON")
con.execute('pragma query_only = ON;')
cur = con.cursor()
for itrial in xrange(60):
try:
c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
if c.fetchone() is None:
shikalog.error("No 'status' in %s" % dbfile)
return
break
except sqlite3.DatabaseError:
shikalog.warning("DB failed. retrying (%d)" % itrial)
time.sleep(1)
continue
for itrial in xrange(60):
try:
c = con.execute("select filename,spots from spots")
results = dict(map(lambda x: (str(x[0]), pickle.loads(str(x[1]))), c.fetchall()))
break
except sqlite3.DatabaseError:
shikalog.warning("DB failed. retrying (%d)" % itrial)
time.sleep(1)
continue
print "DEBUG:: scans=", slog.scans
for scan in slog.scans:
for imgf, (gonio, gc) in scan.filename_coords:
#print imgf, (gonio, gc)
stat = Stat()
# extension should be always .img in shika.db if generated from EIGER stream
possible_imgfs = (imgf, os.path.splitext(imgf)[0] + ".img",
re.sub("(.*)_0([0-9]{6})\..*$", r"\1_\2.img", imgf), # too dirty fix!! for new bss which writes 7-digits filename..
)
imgfs_found = filter(lambda x: x in results, possible_imgfs)
if not imgfs_found: continue
imgf = imgfs_found[0]
snrlist = map(lambda x: x[2], results[imgf]["spots"])
stat.stats = (len(snrlist), sum(snrlist), numpy.median(snrlist) if snrlist else 0)
stat.spots = results[imgf]["spots"]
stat.gonio = gonio
stat.grid_coord = gc
stat.scan_info = scan
stat.thumb_posmag = results[imgf]["thumb_posmag"]
stat.params = results[imgf]["params"]
stat.img_file = os.path.join(target_dir, imgf)
result.append((stat.img_file, stat))
delt = time.time() - startt
shikalog.info("Data loaded: %s (took %f sec)" % (dbfile, delt))
for f, stat in result: current_stats[f] = stat
return current_stats
# load_results()
def decide_fpref(f, scaninfo):
fpref = re_pref_num_ext.search(os.path.basename(f)).group(1)
if scaninfo is not None:
if scaninfo.is_shutterless():
fpref += " (phi=%.2f)" % (scaninfo.fixed_spindle)
else:
fpref += " (phi=%.2f)" % (scaninfo.osc_start)
return fpref
# decide_fpref()
def run(params):
wdir = os.path.abspath(params.target_dir)
target_dir = os.path.normpath(os.path.join(wdir, ".."))
current_stats = load_results(target_dir)
zoo_mode = params.mode == "zoo"
htmlout = os.path.join(wdir, "report_zoo.html" if zoo_mode else "report.html")
make_html_report(current_stats, wdir, htmlout, zoo_mode, params.rotate, params.plot=="grid")
# run()
if __name__ == "__main__":
shikalog.config(None)
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
if not params.target_dir and len(args) >= 1:
params.target_dir = args[0]
run(params)
| bsd-3-clause |
privateip/ansible | lib/ansible/plugins/lookup/lastpass.py | 122 | 2519 | # (c) 2016, Andrew Zenk <azenk@umn.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from subprocess import Popen, PIPE
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LPassException(AnsibleError):
pass
class LPass(object):
def __init__(self, path='lpass'):
self._cli_path = path
@property
def cli_path(self):
return self._cli_path
@property
def logged_in(self):
out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
return err.startswith("Are you sure you would like to log out?")
def _run(self, args, stdin=None, expected_rc=0):
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(stdin)
rc = p.wait()
if rc != expected_rc:
raise LPassException(err)
return out, err
def _build_args(self, command, args=None):
if args is None:
args = []
args = [command] + args
args += ["--color=never"]
return args
def get_field(self, key, field):
if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
else:
out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
return out.strip()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
lp = LPass()
if not lp.logged_in:
raise AnsibleError("Not logged into lastpass: please run 'lpass login' first")
field = kwargs.get('field', 'password')
values = []
for term in terms:
values.append(lp.get_field(term, field))
return values
| gpl-3.0 |
Zord13appdesa/python-for-android | python3-alpha/python3-src/Lib/ast.py | 50 | 12171 | """
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| apache-2.0 |
vveerava/Openstack | neutron/openstack/common/cache/backends.py | 76 | 7793 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
NOTSET = object()
@six.add_metaclass(abc.ABCMeta)
class BaseCache(object):
"""Base Cache Abstraction
:params parsed_url: Parsed url object.
:params options: A dictionary with configuration parameters
for the cache. For example:
- default_ttl: An integer defining the default ttl for keys.
"""
def __init__(self, parsed_url, options=None):
self._parsed_url = parsed_url
self._options = options or {}
self._default_ttl = int(self._options.get('default_ttl', 0))
@abc.abstractmethod
def _set(self, key, value, ttl, not_exists=False):
"""Implementations of this class have to override this method."""
def set(self, key, value, ttl, not_exists=False):
"""Sets or updates a cache entry
.. note:: Thread-safety is required and has to be guaranteed by the
backend implementation.
:params key: Item key as string.
:type key: `unicode string`
:params value: Value to assign to the key. This can be anything that
is handled by the current backend.
:params ttl: Key's timeout in seconds. 0 means no timeout.
:type ttl: int
:params not_exists: If True, the key will be set if it doesn't exist.
Otherwise, it'll always be set.
:type not_exists: bool
:returns: True if the operation succeeds, False otherwise.
"""
if ttl is None:
ttl = self._default_ttl
return self._set(key, value, ttl, not_exists)
def __setitem__(self, key, value):
self.set(key, value, self._default_ttl)
def setdefault(self, key, value):
"""Sets the key value to `value` if it doesn't exist
:params key: Item key as string.
:type key: `unicode string`
:params value: Value to assign to the key. This can be anything that
is handled by the current backend.
"""
try:
return self[key]
except KeyError:
self[key] = value
return value
@abc.abstractmethod
def _get(self, key, default):
"""Implementations of this class have to override this method."""
def get(self, key, default=None):
"""Gets one item from the cache
.. note:: Thread-safety is required and it has to be guaranteed
by the backend implementation.
:params key: Key for the item to retrieve from the cache.
:params default: The default value to return.
:returns: `key`'s value in the cache if it exists, otherwise
`default` should be returned.
"""
return self._get(key, default)
def __getitem__(self, key):
value = self.get(key, NOTSET)
if value is NOTSET:
raise KeyError
return value
@abc.abstractmethod
def __delitem__(self, key):
"""Removes an item from cache.
.. note:: Thread-safety is required and it has to be guaranteed by
the backend implementation.
:params key: The key to remove.
:returns: The key value if there's one
"""
@abc.abstractmethod
def _clear(self):
"""Implementations of this class have to override this method."""
def clear(self):
"""Removes all items from the cache.
.. note:: Thread-safety is required and it has to be guaranteed by
the backend implementation.
"""
return self._clear()
@abc.abstractmethod
def _incr(self, key, delta):
"""Implementations of this class have to override this method."""
def incr(self, key, delta=1):
"""Increments the value for a key
:params key: The key for the value to be incremented
:params delta: Number of units by which to increment the value.
Pass a negative number to decrement the value.
:returns: The new value
"""
return self._incr(key, delta)
@abc.abstractmethod
def _append_tail(self, key, tail):
"""Implementations of this class have to override this method."""
def append_tail(self, key, tail):
"""Appends `tail` to `key`'s value.
:params key: The key of the value to which `tail` should be appended.
:params tail: The list of values to append to the original.
:returns: The new value
"""
if not hasattr(tail, "__iter__"):
raise TypeError('Tail must be an iterable')
if not isinstance(tail, list):
# NOTE(flaper87): Make sure we pass a list
# down to the implementation. Not all drivers
# have support for generators, sets or other
# iterables.
tail = list(tail)
return self._append_tail(key, tail)
def append(self, key, value):
"""Appends `value` to `key`'s value.
:params key: The key of the value to which `tail` should be appended.
:params value: The value to append to the original.
:returns: The new value
"""
return self.append_tail(key, [value])
@abc.abstractmethod
def __contains__(self, key):
"""Verifies that a key exists.
:params key: The key to verify.
:returns: True if the key exists, otherwise False.
"""
@abc.abstractmethod
def _get_many(self, keys, default):
"""Implementations of this class have to override this method."""
return ((k, self.get(k, default=default)) for k in keys)
def get_many(self, keys, default=NOTSET):
"""Gets keys' value from cache
:params keys: List of keys to retrieve.
:params default: The default value to return for each key that is not
in the cache.
:returns: A generator of (key, value)
"""
return self._get_many(keys, default)
@abc.abstractmethod
def _set_many(self, data, ttl):
"""Implementations of this class have to override this method."""
for key, value in data.items():
self.set(key, value, ttl=ttl)
def set_many(self, data, ttl=None):
"""Puts several items into the cache at once
Depending on the backend, this operation may or may not be efficient.
The default implementation calls set for each (key, value) pair
passed, other backends support set_many operations as part of their
protocols.
:params data: A dictionary like {key: val} to store in the cache.
:params ttl: Key's timeout in seconds.
"""
if ttl is None:
ttl = self._default_ttl
self._set_many(data, ttl)
def update(self, **kwargs):
"""Sets several (key, value) paris.
Refer to the `set_many` docstring.
"""
self.set_many(kwargs, ttl=self._default_ttl)
@abc.abstractmethod
def _unset_many(self, keys):
"""Implementations of this class have to override this method."""
for key in keys:
del self[key]
def unset_many(self, keys):
"""Removes several keys from the cache at once
:params keys: List of keys to unset.
"""
self._unset_many(keys)
| apache-2.0 |
crosswalk-project/chromium-crosswalk-efl | tools/swig/swig.py | 133 | 1304 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around swig.
Sets the SWIG_LIB environment var to point to Lib dir
and defers control to the platform-specific swig binary.
Depends on swig binaries being available at ../../third_party/swig.
"""
import os
import subprocess
import sys
def main():
swig_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
os.pardir, os.pardir, 'third_party', 'swig'))
lib_dir = os.path.join(swig_dir, "Lib")
os.putenv("SWIG_LIB", lib_dir)
dir_map = {
'darwin': 'mac',
'linux2': 'linux',
'linux3': 'linux',
'win32': 'win',
}
# Swig documentation lies that platform macros are provided to swig
# preprocessor. Provide them ourselves.
platform_flags = {
'darwin': '-DSWIGMAC',
'linux2': '-DSWIGLINUX',
'linux3': '-DSWIGLINUX',
'win32': '-DSWIGWIN',
}
swig_bin = os.path.join(swig_dir, dir_map[sys.platform], 'swig')
args = [swig_bin, platform_flags[sys.platform]] + sys.argv[1:]
args = [x.replace('/', os.sep) for x in args]
return subprocess.call(args)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
dkubiak789/odoo | addons/crm_claim/__openerp__.py | 260 | 1939 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Claims Management',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
Manage Customer Claims.
=======================
This application allows you to track your customers/suppliers claims and grievances.
It is fully integrated with the email gateway so that you can create
automatically new claims based on incoming emails.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['crm'],
'data': [
'crm_claim_view.xml',
'crm_claim_menu.xml',
'security/ir.model.access.csv',
'report/crm_claim_report_view.xml',
'crm_claim_data.xml',
'res_partner_view.xml',
],
'demo': ['crm_claim_demo.xml'],
'test': [
'test/process/claim.yml',
'test/ui/claim_demo.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
googleapis/python-videointelligence | google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py | 1 | 12840 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.videointelligence_v1p1beta1.types import video_intelligence
from .transports.base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import VideoIntelligenceServiceGrpcAsyncIOTransport
from .client import VideoIntelligenceServiceClient
class VideoIntelligenceServiceAsyncClient:
"""Service that implements Google Cloud Video Intelligence API."""
_client: VideoIntelligenceServiceClient
DEFAULT_ENDPOINT = VideoIntelligenceServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = VideoIntelligenceServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(
VideoIntelligenceServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
VideoIntelligenceServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(VideoIntelligenceServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
VideoIntelligenceServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
VideoIntelligenceServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
VideoIntelligenceServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(
VideoIntelligenceServiceClient.common_project_path
)
parse_common_project_path = staticmethod(
VideoIntelligenceServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
VideoIntelligenceServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
VideoIntelligenceServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceAsyncClient: The constructed client.
"""
return VideoIntelligenceServiceClient.from_service_account_info.__func__(VideoIntelligenceServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceAsyncClient: The constructed client.
"""
return VideoIntelligenceServiceClient.from_service_account_file.__func__(VideoIntelligenceServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> VideoIntelligenceServiceTransport:
"""Returns the transport used by the client instance.
Returns:
VideoIntelligenceServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(VideoIntelligenceServiceClient).get_transport_class,
type(VideoIntelligenceServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, VideoIntelligenceServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = VideoIntelligenceServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def annotate_video(
self,
request: video_intelligence.AnnotateVideoRequest = None,
*,
input_uri: str = None,
features: Sequence[video_intelligence.Feature] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Args:
request (:class:`google.cloud.videointelligence_v1p1beta1.types.AnnotateVideoRequest`):
The request object. Video annotation request.
input_uri (:class:`str`):
Input video location. Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are
supported, which must be specified in the following
format: ``gs://bucket-id/object-id`` (other URI formats
return
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
For more information, see `Request
URIs <https://cloud.google.com/storage/docs/request-endpoints>`__.
A video URI may include wildcards in ``object-id``, and
thus identify multiple videos. Supported wildcards: '*'
to match 0 or more characters; '?' to match 1 character.
If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content``
should be unset.
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
features (:class:`Sequence[google.cloud.videointelligence_v1p1beta1.types.Feature]`):
Required. Requested video annotation
features.
This corresponds to the ``features`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.videointelligence_v1p1beta1.types.AnnotateVideoResponse` Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([input_uri, features])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = video_intelligence.AnnotateVideoRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if input_uri is not None:
request.input_uri = input_uri
if features:
request.features.extend(features)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.annotate_video,
default_retry=retries.Retry(
initial=1.0,
maximum=120.0,
multiplier=2.5,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
video_intelligence.AnnotateVideoResponse,
metadata_type=video_intelligence.AnnotateVideoProgress,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-videointelligence",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("VideoIntelligenceServiceAsyncClient",)
| apache-2.0 |
wolfg1969/oh-my-stars | ohmystars/core.py | 2 | 5976 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
from datetime import datetime
from colorama import Fore
from getpass import getpass, getuser
from netrc import netrc
from github3 import login
from .db import StarredDB, EmptyIndexWarning
from .view import SearchResultView
from . import __version__
import argparse
import os
import subprocess
import sys
import errno
try:
import readline
except ImportError:
import pyreadline as readline
MY_STARS_HOME = os.path.join(os.path.expanduser('~'), '.oh-my-stars')
def get_auth_from_netrc(hostname):
"""Try to find login auth in ``~/.netrc``. Return ``(user, pwd)`` tuple. """
try:
auth = netrc()
except IOError as cause:
if cause.errno != errno.ENOENT:
raise
return None, None
username, _, password = auth.hosts.get(hostname, None) or (None,) * 3
return username, password
def print_text(text, color=None, reset_color=True):
if color is not None:
print(color + text + Fore.RESET if reset_color else '')
else:
print(text)
def main(args=None):
if not os.path.exists(MY_STARS_HOME):
os.makedirs(MY_STARS_HOME)
parser = argparse.ArgumentParser(description='a CLI tool to search your starred Github repositories.')
parser.add_argument('keywords', nargs='*', help='Search by keywords')
parser.add_argument('-l', '--language', help='Filter by language', nargs='+')
parser.add_argument('-u', '--update', action='store_true',
help='Create(first time) or update the local stars index')
parser.add_argument('-r', '--reindex', action='store_true', help='Re-create the local stars index')
parser.add_argument('-c', '--color', default='always', choices=['always', 'auto', 'never'], metavar='WHEN',
help='Colorize the output; WHEN can be \'always\' (default if omitted), \'auto\', or \'never\'')
parser.add_argument('-a', '--alfred', action='store_true',
help='Format search result as Alfred Script Filter output')
parser.add_argument('-3', '--three', action='store_true', help='Alfred 3 support')
parser.add_argument('-i', '--install', action='store_true', help='Import Alfred workflow')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parsed_args = parser.parse_args(args)
enable_color = parsed_args.color == 'always' or (parsed_args.color == 'auto' and sys.stdout.isatty())
if parsed_args.update or parsed_args.reindex:
user, password = get_auth_from_netrc('api.github.com')
if not user:
try:
user = input('GitHub username: ')
except KeyboardInterrupt:
user = getuser()
else:
if not user:
user = getuser()
if not password:
password = getpass('GitHub password for {0}: '.format(user))
if not password:
print_text('password is required.', color=Fore.RED if enable_color else None, reset_color=False)
sys.exit(1)
def gh2f():
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
g = login(user, password, two_factor_callback=gh2f)
mode = 't' if parsed_args.reindex else 'w'
with StarredDB(MY_STARS_HOME, mode) as db:
repo_list = []
for repo in g.iter_starred(sort='created', direction='desc', number=-1):
if db.get_latest_repo_full_name() == repo.full_name:
break
print_text(repo.full_name, color=Fore.BLUE if enable_color else None)
repo_list.append({
'full_name': repo.full_name,
'name': repo.name,
'url': repo.html_url,
'language': repo.language,
'description': repo.description,
})
if repo_list:
t1 = datetime.now()
print_text('Saving repo data...', color=Fore.GREEN if enable_color else None, reset_color=False)
db.update(repo_list)
t2 = datetime.now()
print_text('Done. (took {:d}s)'.format((t2 - t1).total_seconds()),
color=Fore.RED if enable_color else None)
else:
print_text('No new stars found.', color=Fore.RED if enable_color else None)
sys.exit(0)
if parsed_args.install:
if parsed_args.three:
filename = 'ohmystars.alfredworkflow'
else:
filename = 'ohmystars-v2.alfredworkflow'
ret = subprocess.call(' '.join([
'curl -s -o /tmp/{}'.format(filename),
'-H "Accept:application/octet-stream"',
'"{url}{filename}"'.format(
url='https://raw.githubusercontent.com/wolfg1969/oh-my-stars/master/',
filename=filename
),
'&& open "/tmp/{}"'.format(filename)
]), shell=True)
sys.exit(ret)
if not parsed_args.keywords and not parsed_args.language:
parser.print_help()
sys.exit(0)
with StarredDB(MY_STARS_HOME, mode='r') as db:
try:
t1 = datetime.now()
search_result = db.search(parsed_args.language, parsed_args.keywords)
t2 = datetime.now()
view = SearchResultView(
(t2 - t1).total_seconds(),
alfred_format=parsed_args.alfred,
alfred_v3=parsed_args.three,
enable_color=enable_color
)
view.print_search_result(search_result, parsed_args.keywords)
except EmptyIndexWarning:
print(Fore.RED + 'Empty index.' + Fore.RESET)
| mit |
heihei1252/lightblue-0.4 | build/lib/lightblue/_IOBluetoothUI.py | 82 | 2076 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
"""
Provides a python interface to the Mac OSX IOBluetoothUI Framework classes,
through PyObjC.
For example:
>>> from lightblue import _IOBluetoothUI
>>> selector = _IOBluetoothUI.IOBluetoothDeviceSelectorController.deviceSelector()
>>> selector.runModal() # ask user to select a device
-1000
>>> for device in selector.getResults():
... print device.getName() # show name of selected device
...
Nokia 6600
>>>
See http://developer.apple.com/documentation/DeviceDrivers/Reference/IOBluetoothUI/index.html
for Apple's IOBluetoothUI documentation.
See http://pyobjc.sourceforge.net for details on how to access Objective-C
classes through PyObjC.
"""
import objc
try:
# mac os 10.5 loads frameworks using bridgesupport metadata
__bundle__ = objc.initFrameworkWrapper("IOBluetoothUI",
frameworkIdentifier="com.apple.IOBluetoothUI",
frameworkPath=objc.pathForFramework(
"/System/Library/Frameworks/IOBluetoothUI.framework"),
globals=globals())
except (AttributeError, ValueError):
# earlier versions use loadBundle() and setSignatureForSelector()
objc.loadBundle("IOBluetoothUI", globals(),
bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/IOBluetoothUI.framework'))
del objc
| gpl-3.0 |
rousseab/pymatgen | pymatgen/entries/tests/test_exp_entries.py | 4 | 1189 | # coding: utf-8
from __future__ import division, unicode_literals
'''
Created on Jun 27, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 27, 2012"
import unittest
import os
import json
from pymatgen.entries.exp_entries import ExpEntry
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ExpEntryTest(unittest.TestCase):
def setUp(self):
thermodata = json.load(open(os.path.join(test_dir, "Fe2O3_exp.json"),
"r"), cls=MontyDecoder)
self.entry = ExpEntry("Fe2O3", thermodata)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -825.5)
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ExpEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -825.5)
def test_str(self):
self.assertIsNotNone(str(self.entry))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
ReSqAr/arpi | arpi/lib/mail.py | 1 | 3797 | import email
import imaplib
import time
import datetime
class Account:
"""
Wraps an imaplib.IMAP4_SSL account object
and implements the function get_messages.
"""
def __init__(self, host, username, password):
"""
Connect to the given IMAP server.
"""
self.server = imaplib.IMAP4_SSL(host)
self.server.login(username, password)
self.server.select("INBOX")
def get_messages(self,n):
"""
Return the last n emails as Message objects.
"""
# get all email ids
result, data = self.server.search(None, "ALL")
if result != 'OK':
raise RuntimeError("IMAP failed")
# the email ids are an object of type bytes and are separated by b' '
email_ids = data[0].split()
# get emails and convert them to Message objects
messages = []
for email_id in email_ids[-n:]:
# (RFC822) = email body
result, data = self.server.fetch(email_id, "(RFC822)")
if result != 'OK':
raise RuntimeError("IMAP failed")
messages.append( Message(data[0][1]) )
return messages
class Message:
"""
Wraps an email.message object,
exposes convenience methods to quickly extract
* the sender
* the subject
* date/time when the email was sent, and
* 'the' text.
"""
def __init__(self, msg):
self._msg = email.message_from_bytes(msg)
def _get_decoded_header(self, key):
header = self._msg[key]
decoded = email.header.decode_header( header)
email_header = email.header.make_header( decoded )
return str(email_header)
def get_sender(self):
"""
Get the message's sender
"""
sender = self._get_decoded_header('From')
realname, emailaddress = email.utils.parseaddr(sender)
if realname:
return realname.strip()
elif emailaddress:
accountname = emailaddress.split('@')[0]
return accountname.replace('.',' ').replace('-',' ').replace('_',' ').strip()
else:
return "Unknown"
def get_subject(self):
"""
Get the message's subject
"""
subject = self._get_decoded_header('Subject')
return subject.strip()
def get_datetime(self):
"""
Get the message's date time
"""
date = self._msg['Date']
date_tuple = email.utils.parsedate(date)
timestamp = time.mktime(date_tuple)
return datetime.datetime.fromtimestamp(timestamp)
def get_text(self):
"""
Get the message text. Distinguish multipart and non-multipart messages.
"""
# based on: http://stackoverflow.com/questions/17874360/python-how-to-parse-the-body-from-a-raw-email-given-that-raw-email-does-not
if self._msg.is_multipart():
for part in self._msg.walk():
content_type = part.get_content_type()
content_disposition = str(part.get('Content-Disposition'))
if content_type == 'text/plain' and 'attachment' not in content_disposition:
content_charsets = part.get_charsets()
content_charset = content_charsets[0] if content_charsets else "UTF-8"
return str( part.get_payload(decode=True), content_charset, "replace" )
else:
content_charsets = self._msg.get_charsets()
content_charset = content_charsets[0] if content_charsets else "UTF-8"
return str( self._msg.get_payload(decode=True), content_charset, "replace" )
| gpl-3.0 |
AnimeshSinha1309/Website-Edunet | WebsiteEdunet/env/Lib/site-packages/django/contrib/gis/maps/google/__init__.py | 67 | 2767 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render(request, 'template.html', {'google': GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import (
GEvent, GIcon, GMarker, GPolygon, GPolyline,
)
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
| mit |
ChameleonCloud/blazar | blazar/tests/monitor/test_polling_monitor.py | 1 | 2375 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_service import threadgroup
from blazar.monitor import base as base_monitor
from blazar.monitor import polling_monitor
from blazar.plugins import base
from blazar import tests
POLLING_INTERVAL = 10
HEALING_INTERVAL = 10
class DummyMonitorPlugin(base.BaseMonitorPlugin):
def is_notification_enabled(self):
return True
def get_notification_event_types(self):
return []
def get_notification_topics(self):
return []
def notification_callback(self, event_type, message):
return {}
def is_polling_enabled(self):
return False
def get_polling_interval(self):
return POLLING_INTERVAL
def poll(self):
return {}
def get_healing_interval(self):
return HEALING_INTERVAL
def heal(self):
return {}
class PollingHandlerTestCase(tests.TestCase):
def setUp(self):
super(PollingHandlerTestCase, self).setUp()
self.monitor_plugins = [DummyMonitorPlugin()]
self.monitor = polling_monitor.PollingMonitor(self.monitor_plugins)
def test_start_monitoring(self):
add_timer = self.patch(threadgroup.ThreadGroup, 'add_timer')
self.patch(base_monitor.BaseMonitor, 'start_monitoring')
self.monitor.start_monitoring()
add_timer.assert_called_once_with(
POLLING_INTERVAL, self.monitor.call_monitor_plugin, None,
self.monitor_plugins[0].poll)
def test_stop_monitoring(self):
dummy_timer = mock.Mock()
timer_done = self.patch(threadgroup.ThreadGroup, 'timer_done')
self.monitor.polling_timers.append(dummy_timer)
self.patch(base_monitor.BaseMonitor, 'stop_monitoring')
self.monitor.stop_monitoring()
timer_done.assert_called_once_with(dummy_timer)
| apache-2.0 |
telefonicaid/fiware-cloto | fiware_cloto/cloto/tests/test_views.py | 3 | 1316 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'gjp'
from django.test import TestCase
class MyTests(TestCase):
def test_views_fail_without_server_info(self):
""" This test should return a 500 error if there is no information about server created in the
data base
"""
response = self.client.get("/info")
self.assertEqual(response.status_code, 500)
def test_views_fail(self):
response = self.client.post("/fail", data={})
self.assertEqual(response.status_code, 400)
| apache-2.0 |
Facetracker-project/facetracker-core | lib/youtube-dl/youtube_dl/extractor/vube.py | 36 | 6867 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
int_or_none,
ExtractorError,
)
class VubeIE(InfoExtractor):
IE_NAME = 'vube'
IE_DESC = 'Vube.com'
_VALID_URL = r'http://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b'
_TESTS = [
{
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
'info_dict': {
'id': 'Y8NUZ69Tf7',
'ext': 'mp4',
'title': 'Best Drummer Ever [HD]',
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'William',
'timestamp': 1406876915,
'upload_date': '20140801',
'duration': 258.051,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
},
}, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
'id': 'YL2qNPkqon',
'ext': 'mp4',
'title': 'Chiara Grispo - Price Tag by Jessie J',
'description': 'md5:8ea652a1f36818352428cb5134933313',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$',
'uploader': 'Chiara.Grispo',
'timestamp': 1388743358,
'upload_date': '20140103',
'duration': 170.56,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
},
'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'md5': '5d4a52492d76f72712117ce6b0d98d08',
'info_dict': {
'id': 'UeBhTudbfS',
'ext': 'mp4',
'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
'description': 'md5:40bcacb97796339f1690642c21d56f4a',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$',
'uploader': 'Seraina',
'timestamp': 1396492438,
'upload_date': '20140403',
'duration': 240.107,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
},
'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
'info_dict': {
'id': '0nmsMY5vEq',
'ext': 'mp4',
'title': 'Frozen - Let It Go Cover by Siren Gene',
'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
'uploader': 'Siren',
'timestamp': 1395448018,
'upload_date': '20140322',
'duration': 221.788,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
},
'skip': 'Removed due to DMCA',
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id']
formats = []
for media in video['media'].get('video', []) + video['media'].get('audio', []):
if media['transcoding_status'] != 'processed':
continue
fmt = {
'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
'abr': int(media['audio_bitrate']),
'format_id': compat_str(media['media_resolution_id']),
}
vbr = int(media['video_bitrate'])
if vbr:
fmt.update({
'vbr': vbr,
'height': int(media['height']),
})
formats.append(fmt)
self._sort_formats(formats)
if not formats and video.get('vst') == 'dmca':
raise ExtractorError(
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
expected=True)
title = video['title']
description = video.get('description')
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
uploader = video.get('user_alias') or video.get('channel')
timestamp = int_or_none(video.get('upload_time'))
duration = video['duration']
view_count = video.get('raw_view_count')
like_count = video.get('total_likes')
dislike_count = video.get('total_hates')
comments = video.get('comments')
comment_count = None
if comments is None:
comment_data = self._download_json(
'http://vube.com/api/video/%s/comment' % video_id,
video_id, 'Downloading video comment JSON', fatal=False)
if comment_data is not None:
comment_count = int_or_none(comment_data.get('total'))
else:
comment_count = len(comments)
categories = [tag['text'] for tag in video['tags']]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
}
| gpl-2.0 |
laurent-george/weboob | modules/creditcooperatif/perso/browser.py | 5 | 3161 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Kevin Pouget
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser import LoginBrowser, URL, need_login
from weboob.exceptions import BrowserIncorrectPassword, BrowserUnavailable
from .pages import LoginPage, CreditLoggedPage, AccountsPage, TransactionsPage, TransactionsJSONPage, ComingTransactionsPage
__all__ = ['CreditCooperatif']
class CreditCooperatif(LoginBrowser):
BASEURL = "https://www.credit-cooperatif.coop"
loginpage = URL('/portail//particuliers/login.do', LoginPage)
loggedpage = URL('/portail/particuliers/authentification.do', CreditLoggedPage)
accountspage = URL('/portail/particuliers/mescomptes/synthese.do', AccountsPage)
transactionpage = URL('/portail/particuliers/mescomptes/relevedesoperations.do', TransactionsPage)
transactjsonpage = URL('/portail/particuliers/mescomptes/relevedesoperationsjson.do', TransactionsJSONPage)
comingpage = URL('/portail/particuliers/mescomptes/synthese/operationsencourslien.do', ComingTransactionsPage)
def do_login(self):
"""
Attempt to log in.
Note: this method does nothing if we are already logged in.
"""
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
self.loginpage.stay_or_go()
self.page.login(self.username, self.password)
if self.loggedpage.is_here():
error = self.page.get_error()
if error is None:
return
else:
raise BrowserUnavailable("not on the login page")
raise BrowserIncorrectPassword(error)
@need_login
def get_accounts_list(self):
self.accountspage.stay_or_go()
return self.page.get_list()
@need_login
def get_history(self, account):
data = {'accountExternalNumber': account.id}
self.transactionpage.go(data=data)
data = {'iDisplayLength': 400,
'iDisplayStart': 0,
'iSortCol_0': 0,
'iSortingCols': 1,
'sColumns': '',
'sEcho': 1,
'sSortDir_0': 'asc',
}
self.transactjsonpage.go(data=data)
return self.page.get_transactions()
@need_login
def get_coming(self, account):
data = {'accountExternalNumber': account.id}
self.comingpage.go(data=data)
assert self.comingpage.is_here()
return self.page.get_transactions()
| agpl-3.0 |
thomasleese/cartographer | cartographer/mbtiles.py | 1 | 7249 | import os
import sqlite3
from .boundaries import Boundary
class TilesetMetadata:
KNOWN_KEYS = ['name', 'type', 'version', 'description', 'format',
'bounds', 'attribution']
def __init__(self, db):
self.db = db
def __setitem__(self, name, value):
cursor = self.db.cursor()
cursor.execute('UPDATE metadata SET value = ? WHERE name = ?',
(value, name))
if cursor.rowcount == 0:
cursor.execute('INSERT INTO metadata (name, value) VALUES (?, ?)',
(name, value))
self.db.commit()
def __getitem__(self, name):
cursor = self.db.cursor()
cursor.execute('SELECT value FROM metadata WHERE name = ?', (name,))
row = cursor.fetchone()
if row is None:
raise KeyError(name)
else:
return row[0]
def __contains__(self, name):
cursor = self.db.cursor()
cursor.execute('SELECT COUNT(*) FROM metadata WHERE name = ?', (name,))
row = cursor.fetchone()
return row[0] > 0
def __delitem__(self, name):
cursor = self.db.cursor()
cursor.execute('DELETE FROM metadata WHERE name = ?', (name,))
if cursor.rowcount == 0:
raise KeyError(name)
self.db.commit()
class TilesetTiles:
def __init__(self, db):
self.db = db
def __setitem__(self, key, value):
zoom, col, row = key
cursor = self.db.cursor()
cursor.execute("""
UPDATE tiles
SET tile_data = ?
WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?
""", (value, zoom, col, row))
if cursor.rowcount == 0:
cursor.execute("""
INSERT INTO
tiles (zoom_level, tile_column, tile_row, tile_data)
VALUES (?, ?, ?, ?)
""", (zoom, col, row, value))
self.db.commit()
def __getitem__(self, key):
zoom, col, row = key
cursor = self.db.cursor()
cursor.execute("""
SELECT tile_data
FROM tiles
WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?
""", (zoom, col, row))
row = cursor.fetchone()
if row is None:
raise KeyError(key)
else:
return row[0]
def __contains__(self, key):
zoom, col, row = key
cursor = self.db.cursor()
cursor.execute("""
SELECT COUNT(*)
FROM tiles
WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?
""", (zoom, col, row))
row = cursor.fetchone()
return row[0] > 0
def __delitem__(self, key):
zoom, col, row = key
cursor = self.db.cursor()
cursor.execute("""
DELETE FROM tiles
WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?
""", (zoom, col, row))
if cursor.rowcount == 0:
raise KeyError(key)
self.db.commit()
def count(self, zoom=None, col=None, row=None):
sql = 'SELECT COUNT(*) FROM tiles WHERE '
where = []
args = []
if zoom is not None:
where.append('zoom_level = ?')
args.append(zoom)
if col is not None:
where.append('tile_column = ?')
args.append(col)
if row is not None:
where.append('tile_row = ?')
args.append(row)
sql += ' AND '.join(where)
cursor = self.db.cursor()
cursor.execute(sql, args)
row = cursor.fetchone()
return row[0]
def all(self):
cursor = self.db.cursor()
cursor.execute("""
SELECT zoom_level, tile_column, tile_row, tile_data
FROM tiles
""")
yield from cursor
@property
def zoom_levels(self):
cursor = self.db.cursor()
cursor.execute('SELECT DISTINCT zoom_level FROM tiles')
return [row[0] for row in cursor.fetchall()]
def _get_row(self, tile_row, zoom_level):
cursor = self.db.cursor()
cursor.execute("""
SELECT tile_column
FROM tiles
WHERE tile_row = ? AND zoom_level = ?
""", (tile_row, zoom_level))
columns = [row[0] for row in cursor.fetchall()]
return columns
class TilesetSchema:
def __init__(self, db):
self.db = db
@staticmethod
def _create_metadata_table(cursor):
cursor.execute("""
CREATE TABLE metadata (
name TEXT,
value TEXT
)
""")
cursor.execute("""
CREATE UNIQUE INDEX metadata_name ON metadata (name);
""")
@staticmethod
def _create_tiles_table(cursor):
cursor.execute("""
CREATE TABLE tiles (
zoom_level INTEGER,
tile_row INTEGER,
tile_column INTEGER,
tile_data BLOB
);
""")
def create(self):
cursor = self.db.cursor()
self._create_metadata_table(cursor)
self._create_tiles_table(cursor)
self.db.commit()
class Tileset:
def __init__(self, filename, create=False, upgrade=False):
if not create and not os.path.exists(filename):
raise ValueError('Tileset does not exist: {}'.format(filename))
self.db = sqlite3.connect(filename)
self.schema = TilesetSchema(self.db)
if create:
self.schema.create()
self.metadata = TilesetMetadata(self.db)
self.tiles = TilesetTiles(self.db)
@property
def boundary(self):
tokens = [float(token) for token in self.bounds.split(',')]
return Boundary(*tokens)
@boundary.setter
def boundary(self, value):
self.bounds = value.as_metadata()
@property
def mime_type(self):
if self.format == 'png':
return 'image/png'
elif self.format == 'jpg':
return 'image/jpeg'
else:
raise ValueError('Unsupported format.')
@property
def zoom_levels(self):
if 'zoom_levels' in self.metadata:
zoom_levels_str = self.metadata['zoom_levels']
return [int(token) for token in zoom_levels_str.split(',')]
else:
return self.tiles.zoom_levels
def __getattr__(self, key):
if key in TilesetMetadata.KNOWN_KEYS:
return self.metadata[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key in TilesetMetadata.KNOWN_KEYS:
self.metadata[key] = value
else:
super().__setattr__(key, value)
def __delattr__(self, key):
if key in TilesetMetadata.KNOWN_KEYS:
del self.metadata[key]
else:
super().__delattr__(key)
def __setitem__(self, key, value):
self.tiles[key] = value
def __getitem__(self, key):
return self.tiles[key]
def __delitem__(self, key):
del self.tiles[key]
def __contains__(self, key):
return key in self.tiles
def __iter__(self):
yield from self.tiles.all()
| mit |
rspeer/solvertools | solvertools/puzzle_structures.py | 1 | 8177 | from solvertools.util import data_path
from solvertools.wordlist import WORDS
from solvertools.normalize import slugify, alphanumeric
from solvertools.regextools import regex_index, regex_len
from itertools import permutations
from natsort import natsorted
import csv
import re
class RegexClue:
"""
A wrapper for answers that are indicated to be regular expressions.
Such answers are indicated in spreadsheet input by surrounding them with
slashes.
"""
def __init__(self, expr):
self.expr = expr
self.compiled = re.compile('^' + expr + '$')
def match(self, text):
return self.compiled.match(text)
def resolve(self):
"""
Get the most likely word that fits this pattern.
"""
if self.expr == '.+':
# a shortcut for a common case
return 'THE'
found = WORDS.search(self.expr, count=1)
return found[0][1]
def __getitem__(self, index):
return regex_index(self.expr, index)
def __len__(self):
return regex_len(self.expr)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.expr == other.expr
def __ne__(self, other):
return not (self == other)
def __str__(self):
if self.expr == '.+':
return "ANY"
else:
return "/%s/" % self.expr
def __repr__(self):
if self.expr == '.+':
return "ANY"
else:
return "RegexClue(%r)" % self.expr
ANY = RegexClue('.+')
def parse_cell(cell):
"""
Handle some special syntax. A cell can contain a regex surrounded with
slashes, in which case it will be interpreted as a regex. Or it can be
the empty string, in which case it will match any word.
"""
if isinstance(cell, RegexClue):
return cell
cell = cell.strip()
if cell == '':
return ANY
elif cell.startswith('/') and cell.endswith('/'):
reg = cell[1:-1]
return RegexClue(reg)
else:
return alphanumeric(cell)
def parse_csv_row(row):
return [parse_cell(cell) for cell in row]
def read_csv_string(string):
reader = csv.reader(string.split('\n'))
return [parse_csv_row(row) for row in reader if row]
def read_csv_file(filename):
with open(filename, encoding='utf-8') as file:
reader = csv.reader(file, dialect='excel')
return [parse_csv_row(row) for row in reader]
def diagonalize(items):
"""
Take the diagonal of a list of words. If the diagonal runs off the end
of a word, raise an IndexError.
"""
return ''.join([items[i][i] for i in range(len(items))])
def acrostic(items):
"""
Take the acrostic of a list of words -- the first letter of each word.
"""
return ''.join([item[0] for item in items])
def brute_force_diagonalize(answers, wordlist=WORDS, quiet=False):
"""
Find the most cromulent diagonalization for a set of answers, trying all
possible orders. See README.md for a cool example of this with 10 answers.
As a somewhat artificial example, let's suppose we have these seven
answers from the 2000 metas, but don't remember their order:
>>> metas = ['benjamins', 'billgates', 'donors', 'luxor', 'mansion', 'miserly', 'realty']
>>> brute_force_diagonalize(metas)[0] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Cromulence Text Info
9.5 RUN EAST
9.2 MIX LAST
9.1 MAX LAST
9.1 BE NOISY
8.8 LINE TO I
...
(9.5, 'RUN EAST', None)
Of course we were looking for the famous red herring "BE NOISY", but
"RUN EAST" sounds like a good way to find the coin also.
"""
results = []
seen = set()
answers = [parse_cell(word) for word in answers]
for i, permutation in enumerate(permutations(answers)):
if not quiet and i > 0 and i % 10000 == 0:
print("Tried %d permutations" % i)
try:
diag = diagonalize(permutation)
except IndexError:
continue
found = wordlist.search(diag, count=1, use_cromulence=True)
if found:
logprob, text = found[0]
slug = slugify(text)
if slug not in seen:
results.append((logprob, text, None))
seen.add(slug)
return wordlist.show_best_results(results)
def resolve(item):
"""
Get a non-ambiguous string for each item. If it's uncertain, pick a word,
even if it's just THE. This lets us at least try a sort order, although
uncertain answers may be out of place.
"""
if isinstance(item, RegexClue):
return item.resolve()
else:
return item
def _index_by(indexee, index):
if isinstance(index, RegexClue):
if index == ANY:
return '.'
else:
raise IndexError
else:
num = int(index)
return indexee[num - 1]
def _try_indexing(grid, titles):
ncols = len(titles)
nrows = len(grid)
for sort_col in [None] + list(range(ncols)):
if sort_col is None:
ordered = grid
sort_title = None
else:
sort_keys = [(row, resolve(grid[row][sort_col])) for row in range(nrows)]
sorted_keys = natsorted(sort_keys, key=lambda x: x[1])
ordered = [grid[row] for row, key in sorted_keys]
sort_title = titles[sort_col]
for indexed_col in range(ncols):
column = [grid_row[indexed_col] for grid_row in ordered]
try:
info = (sort_title, titles[indexed_col], '1ST')
yield acrostic(column), info
info = (sort_title, titles[indexed_col], 'DIAG')
yield diagonalize(column), info
except IndexError:
pass
for indexing_col in range(ncols):
if indexing_col == indexed_col:
continue
indices = [grid_row[indexing_col] for grid_row in ordered]
try:
letters = [_index_by(cell, index_cell)
for (cell, index_cell) in zip(column, indices)]
index_result = ''.join(letters)
info = (sort_title, titles[indexed_col], titles[indexing_col])
yield index_result, info
except (IndexError, ValueError):
pass
def readable_indexing(info):
sortby, indexed, indexer = info
if sortby is None:
sort_part = "Don't sort"
else:
sort_part = "Sort by %r" % sortby
if indexer == '1ST':
index_part = "take the first letters of"
elif indexer == 'DIAG':
index_part = "take the diagonal of"
else:
index_part = "index by %r into" % indexer
return "%s, %s %r" % (sort_part, index_part, indexed)
DIGITS_RE = re.compile(r'[0-9]')
def index_all_the_things(grid, count=20):
"""
Try every combination of sorting by one column and indexing another column,
possibly by the numeric values in a third column.
"""
titles = grid[0]
ncols = len(titles)
data = []
for row in grid[1:]:
if len(row) < ncols:
row = row + [''] * (ncols - len(row))
data.append([parse_cell(cell) for cell in row])
best_logprob = -1000
results = []
seen = set()
for pattern, info in _try_indexing(data, titles):
if DIGITS_RE.search(pattern):
continue
found = WORDS.search(pattern, count=5, use_cromulence=True)
for logprob, text in found:
if text not in seen:
seen.add(text)
description = readable_indexing(info)
results.append((logprob, text, description))
if logprob > best_logprob:
print("\t%2.2f\t%s\t%s" % (logprob, text, description))
best_logprob = logprob
print()
return WORDS.show_best_results(results, count)
def indexing_demo():
filename = data_path('test/soylent_partners.csv')
grid = read_csv_file(filename)
index_all_the_things(grid)
| mit |
KellyChan/python-examples | python/aldebaran/hana/hana/motion/move/motion_moveTo.py | 3 | 2060 | # -*- encoding: UTF-8 -*-
'''Move To: Small example to make Nao Move To an Objective'''
import math
import almath as m # python's wrapping of almath
import sys
from naoqi import ALProxy
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
# Set NAO in stiffness On
StiffnessOn(motionProxy)
#####################
## Enable arms control by move algorithm
#####################
motionProxy.setWalkArmsEnabled(True, True)
#~ motionProxy.setWalkArmsEnabled(False, False)
#####################
## FOOT CONTACT PROTECTION
#####################
#~ motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION",False]])
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
#####################
## get robot position before move
#####################
initRobotPosition = m.Pose2D(motionProxy.getRobotPosition(False))
X = 0.3
Y = 0.1
Theta = math.pi/2.0
motionProxy.post.moveTo(X, Y, Theta)
# wait is useful because with post moveTo is not blocking function
motionProxy.waitUntilMoveIsFinished()
#####################
## get robot position after move
#####################
endRobotPosition = m.Pose2D(motionProxy.getRobotPosition(False))
#####################
## compute and print the robot motion
#####################
robotMove = m.pose2DInverse(initRobotPosition)*endRobotPosition
print "Robot Move :", robotMove
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_moveTo.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
| mit |
getnamo/UnrealEnginePython | tutorials/PlottingGraphsWithMatplotlibAndUnrealEnginePython_Assets/graph_texture.py | 3 | 1064 | import unreal_engine as ue
# EPixelFormat defines the various pixel formats for a texture/image, we will use RGBA with 8bit per channel
from unreal_engine.enums import EPixelFormat
import matplotlib
# set the Agg renderer as we do not need any toolkit
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# set texture/plot dimensions and dpi, ensure dpi is a float !
width = 1024
height = 1024
dpi = 72.0
# create a new figure with the specified sizes
fig = plt.figure(1)
fig.set_dpi(dpi)
fig.set_figwidth(width/dpi)
fig.set_figheight(height/dpi)
# plot a simple graph with a label on the y axis
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
# draw the graph (in memory)
fig.canvas.draw()
# create a texture in memory (will be saved later)
texture = ue.create_transient_texture(width, height, EPixelFormat.PF_R8G8B8A8)
# copy pixels from matplotlib canvas to the texture as RGBA
texture.texture_set_data(fig.canvas.buffer_rgba())
# save the texture
texture.save_package('/Game/FirstGraphTexture')
# open its editor
ue.open_editor_for_asset(texture)
| mit |
Hurence/log-island | logisland-components/logisland-processors/logisland-processor-scripting/src/main/python/processors/basic/BasicProcessor.py | 3 | 1416 | # coding: utf-8
from AbstractProcessor import AbstractProcessor
from com.hurence.logisland.record import StandardRecord
#
# Simple python processor to test ability to run python code and process some
# records.
#
# The python_processor.python_processor_script_path config property of the
# java python processor must point to a pyhton module file. This module must
# at least contain the definition of a python class with the same name as the
# one of the module and this class must inherits from the logisland provided
# python class: AbstractProcessor
#
class BasicProcessor(AbstractProcessor):
def init(self, context):
print "Inside init of BasicProcessor python code"
def process(self, context, records):
print "Inside process of BasicProcessor python code"
# Copy the records and add python_field field in it
outputRecords = []
for record in records:
copyRecord = StandardRecord(record)
# Check that one can read values coming from java
javaFieldValue = copyRecord.getField("java_field").getRawValue()
expectedValue = "java_field_value"
assert (javaFieldValue == expectedValue) , "Expected " + expectedValue + " but got " + javaFieldValue
copyRecord.setStringField('python_field', 'python_field_value')
outputRecords.append(copyRecord)
return outputRecords | apache-2.0 |
jwalgran/otm-core | opentreemap/importer/errors.py | 3 | 4182 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.utils.translation import ugettext_lazy as _
_messages_by_code = {}
def e(code, message, fatal):
_messages_by_code[code] = message
return (code, message, fatal)
def get_message(code):
return _messages_by_code[code]
def is_itree_error_code(code):
return 60 <= code <= 69
######################################
# FILE LEVEL ERRORS
######################################
#
# Errors that are attributed to the file and prevent the
# rows from being loaded and validated.
#
EMPTY_FILE = e(1, _('No rows found'), True)
UNMATCHED_FIELDS = e(3, _('Unrecognized fields in header row'), False)
MISSING_FIELD = e(5, _('This field is required'), True)
GENERIC_ERROR = e(
6, _('An exception was raised while uploading the file'), True)
######################################
# ROW LEVEL ERRORS
######################################
#
# Errors that are attributed to rows
#
INVALID_GEOM = e(10, _('Longitude must be between -180 and 180 (inclusive) '
'and latitude must be between -90 and 90 (exclusive)'),
True)
GEOM_OUT_OF_BOUNDS = e(
11, _('Geometry must be inside the map bounds'), True)
EXCL_ZONE = e(12, _('Geometry may not be in an exclusion zone'), True)
INVALID_SPECIES = e(20, _('Could not find this scientific name on the '
'master species list.'), True)
DUPLICATE_SPECIES = e(21, _('More than one species matches the given '
'scientific name.'), True)
INVALID_PLOT_ID = e(30, _('The given OpenTreeMap Planting Site ID does not '
'exist in the system. This ID is automatically '
'generated by OpenTreeMap and should only '
'be used for updating existing records'), True)
INVALID_TREE_ID = e(31, _('The given OpenTreeMap Tree ID does not exist '
'in the system. This ID is automatically '
'generated by OpenTreeMap and should only '
'be used for updating existing records'), True)
PLOT_TREE_MISMATCH = e(32, _('The planting site specified by the given '
'OpenTreeMap Planting Site ID does not contain '
'the tree specified by the given OpenTreeMap '
'Tree ID'), True)
FLOAT_ERROR = e(40, _('Not formatted as a number'), True)
POS_FLOAT_ERROR = e(41, _('Not formatted as a positive number'), True)
INT_ERROR = e(42, _('Not formatted as an integer'), True)
POS_INT_ERROR = e(43, _('Not formatted as a positive integer'), True)
BOOL_ERROR = e(44, _('Not formatted as a boolean'), True)
STRING_TOO_LONG = e(45, _('Strings must be less than 255 characters'),
True)
INVALID_DATE = e(46, _('Invalid date (must by YYYY-MM-DD)'), True)
INVALID_UDF_VALUE = e(50, _('Invalid value for custom field'), True)
INVALID_ITREE_REGION = e(60, _('Unknown i-Tree region'), True)
ITREE_REGION_NOT_IN_INSTANCE = e(
61, _('i-Tree region not valid for this treemap'), True)
INVALID_ITREE_CODE = e(62, _('Unknown i-Tree code'), True)
ITREE_CODE_NOT_IN_REGION = e(
63, _('i-Tree code not defined for region'), True)
INSTANCE_HAS_NO_ITREE_REGION = e(64, _('This treemap intersects no '
'i-Tree regions and has no '
'default region'), True)
INSTANCE_HAS_MULTIPLE_ITREE_REGIONS = e(
65, _('This treemap intersects more than one i-Tree region'), True)
MERGE_REQUIRED = e(71, _('This row must be merged'), False)
NEARBY_TREES = e(
1050, _('There are already trees within ten feet of this one'), False)
DUPLICATE_TREE = e(
1051, _('There is already a tree at the specified location'), True)
SPECIES_DBH_TOO_HIGH = e(1060,
_('The diameter is too large for this species'),
False)
SPECIES_HEIGHT_TOO_HIGH = e(1061,
_('The height is too large for this species'),
False)
| gpl-3.0 |
shajoezhu/server | tests/unit/test_protocol_errors.py | 1 | 3820 | """
Unit tests for frontend error conditions.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.frontend as frontend
import ga4gh.exceptions as exceptions
import ga4gh.protocol as protocol
import ga4gh.avrotools as avrotools
import tests.utils as utils
class TestFrontendErrors(unittest.TestCase):
"""
Tests the frontend for various errors that can occur and verify
that the correct exception was raised by the error code sent
back.
"""
@classmethod
def setUpClass(cls):
frontend.configure(baseConfig="TestConfig")
cls.app = frontend.app.test_client()
@classmethod
def tearDownClass(cls):
cls.app = None
def setUp(self):
# TODO replace this with ALL post methods once the rest of the
# end points have been implemented. This should also add an API
# to protocol.py to simplify and document the process of getting
# the correct API endpoints and classes. That is, we shouldn't
# use protocol.postMethods directly, but instead call a function.
supportedMethods = set([
protocol.SearchCallSetsRequest,
protocol.SearchVariantSetsRequest,
protocol.SearchVariantsRequest,
])
self.endPointMap = {}
for endPoint, requestClass, responseClass in protocol.postMethods:
if requestClass in supportedMethods:
path = utils.applyVersion(endPoint)
self.endPointMap[path] = requestClass
def _createInstance(self, requestClass):
"""
Returns a valid instance of the specified class.
"""
creator = avrotools.Creator(requestClass)
instance = creator.getTypicalInstance()
return instance
def assertRawRequestRaises(self, exceptionClass, url, requestString):
"""
Verifies that the specified request string returns a protocol
exception corresponding to the specified class when applied to
all POST endpoints.
"""
response = self.app.post(
url, headers={'Content-type': 'application/json'},
data=requestString)
self.assertEqual(response.status_code, exceptionClass.httpStatus)
error = protocol.GAException.fromJsonString(response.data)
self.assertEqual(
error.errorCode, exceptionClass.getErrorCode())
self.assertGreater(len(error.message), 0)
def assertRequestRaises(self, exceptionClass, url, request):
"""
Verifies that the specified request returns a protocol exception
corresponding to the specified exception class.
"""
self.assertRawRequestRaises(
exceptionClass, url, request.toJsonString())
def testPageSize(self):
for url, requestClass in self.endPointMap.items():
for badType in ["", "1", "None", 0.0, 1e3]:
request = self._createInstance(requestClass)
request.pageSize = badType
self.assertRequestRaises(
exceptions.RequestValidationFailureException, url, request)
for badSize in [-100, -1, 0]:
request = self._createInstance(requestClass)
request.pageSize = badSize
self.assertRequestRaises(
exceptions.BadPageSizeException, url, request)
def testPageToken(self):
for url, requestClass in self.endPointMap.items():
for badType in [0, 0.0, 1e-3, {}, [], [None]]:
request = self._createInstance(requestClass)
request.pageToken = badType
self.assertRequestRaises(
exceptions.RequestValidationFailureException, url, request)
| apache-2.0 |
mbauskar/sapphire-erpnext | erpnext/buying/doctype/supplier/supplier.py | 9 | 4033 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe import msgprint, _
from frappe.model.naming import make_autoname
from erpnext.utilities.address_and_contact import load_address_and_contact
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import validate_party_accounts
class Supplier(TransactionBase):
def get_feed(self):
return self.supplier_name
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "supplier")
def autoname(self):
supp_master_name = frappe.defaults.get_global_default('supp_master_name')
if supp_master_name == 'Supplier Name':
self.name = self.supplier_name
else:
self.name = make_autoname(self.naming_series + '.#####')
def update_address(self):
frappe.db.sql("""update `tabAddress` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def update_contact(self):
frappe.db.sql("""update `tabContact` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def on_update(self):
if not self.naming_series:
self.naming_series = ''
self.update_address()
self.update_contact()
def validate(self):
#validation for Naming Series mandatory field...
if frappe.defaults.get_global_default('supp_master_name') == 'Naming Series':
if not self.naming_series:
msgprint(_("Series is mandatory"), raise_exception=1)
validate_party_accounts(self)
def get_contacts(self,nm):
if nm:
contact_details =frappe.db.convert_to_lists(frappe.db.sql("select name, CONCAT(IFNULL(first_name,''),' ',IFNULL(last_name,'')),contact_no,email_id from `tabContact` where supplier = %s", nm))
return contact_details
else:
return ''
def delete_supplier_address(self):
for rec in frappe.db.sql("select * from `tabAddress` where supplier=%s", (self.name,), as_dict=1):
frappe.db.sql("delete from `tabAddress` where name=%s",(rec['name']))
def delete_supplier_contact(self):
for contact in frappe.db.sql_list("""select name from `tabContact`
where supplier=%s""", self.name):
frappe.delete_doc("Contact", contact)
def on_trash(self):
self.delete_supplier_address()
self.delete_supplier_contact()
def after_rename(self, olddn, newdn, merge=False):
set_field = ''
if frappe.defaults.get_global_default('supp_master_name') == 'Supplier Name':
frappe.db.set(self, "supplier_name", newdn)
self.update_contact()
set_field = ", supplier_name=%(newdn)s"
self.update_supplier_address(newdn, set_field)
def update_supplier_address(self, newdn, set_field):
frappe.db.sql("""update `tabAddress` set address_title=%(newdn)s
{set_field} where supplier=%(newdn)s"""\
.format(set_field=set_field), ({"newdn": newdn}))
@frappe.whitelist()
def get_dashboard_info(supplier):
if not frappe.has_permission("Supplier", "read", supplier):
frappe.throw(_("No permission"))
out = {}
for doctype in ["Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"]:
out[doctype] = frappe.db.get_value(doctype,
{"supplier": supplier, "docstatus": ["!=", 2] }, "count(*)")
billing_this_year = frappe.db.sql("""
select sum(ifnull(credit_in_account_currency, 0)) - sum(ifnull(debit_in_account_currency, 0))
from `tabGL Entry`
where voucher_type='Purchase Invoice' and party_type = 'Supplier'
and party=%s and fiscal_year = %s""",
(supplier, frappe.db.get_default("fiscal_year")))
total_unpaid = frappe.db.sql("""select sum(outstanding_amount)
from `tabPurchase Invoice`
where supplier=%s and docstatus = 1""", supplier)
out["billing_this_year"] = billing_this_year[0][0] if billing_this_year else 0
out["total_unpaid"] = total_unpaid[0][0] if total_unpaid else 0
out["company_currency"] = frappe.db.sql_list("select distinct default_currency from tabCompany")
return out
| agpl-3.0 |
surgebiswas/poker | PokerBots_2017/Johnny/scipy/signal/_peak_finding.py | 29 | 17909 | """
Functions for identifying peaks in signals.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import xrange
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmax
Notes
-----
This function uses `argrelextrema` with np.less as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmin
Notes
-----
This function uses `argrelextrema` with np.greater as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None, min_snr=1, noise_perc=10):
"""
Attempt to find the peaks in a 1-D array.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is 2.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
Returns
-------
peaks_indices : list
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
min_snr=min_snr, noise_perc=noise_perc)
max_locs = [x[1][0] for x in filtered]
return sorted(max_locs)
| mit |
VoigtLab/dnaplotlib | gallery/repressilator_animate/repressilator_figure.py | 1 | 8758 | #!/usr/bin/env python
"""
Animation of the repressilator gene circuit
"""
import numpy as np
from scipy.integrate import odeint
import dnaplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
__author__ = 'Emerson Glassey <eglassey@mit.edu>, Voigt Lab, MIT'
__license__ = 'MIT'
__version__ = '1.0'
# Initialize Simulation
# Initial concentration of mRNA and Protein for each repressor
mtet, mlac, mgamma, tet, lac, gamma = initial = [1, 1, 1, 2, 1, 1]
# Non-dimensionalized production rate
alpha = 15
# Degradation Rate
beta = 2000
# Repressor/Promoter Leak
leak = 1
# Hill Coefficient
n = 8
# Initialize Parts
# tetr is orange [1.00, 0.75, 0.17]
# lacI is green [0.38, 0.82, 0.32]
# gamma is blue [0.38, 0.65, 0.87]
plac = {'name':'P_lac', 'start':1, 'end':10, 'type':'Promoter', 'opts': {'color':[0.38, 0.82, 0.32]}}
rbs1 = {'name':'RBS', 'start':11, 'end':20, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
tetr = {'name':'tetR', 'start':21, 'end':40, 'type':'CDS', 'opts':{'label': 'tetR', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -2, 'label_style':'italic', 'color':[1.00, 0.75, 0.17]}}
term1 = {'name':'Term', 'start':41, 'end':55, 'type':'Terminator'}
pgamma = {'name':'P_gamma', 'start':56, 'end':65, 'type':'Promoter', 'opts': {'color':[0.38, 0.65, 0.87]}}
rbs2 = {'name':'RBS', 'start':66, 'end':75, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
laci = {'name':'lacI', 'start':76, 'end':95, 'type':'CDS', 'opts':{'label': 'lacI', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -2, 'label_style':'italic', 'color':[0.38, 0.82, 0.32]}}
term2 = {'name':'Term', 'start':96, 'end':110, 'type':'Terminator'}
ptet = {'name':'P_tet', 'start':111, 'end':120, 'type':'Promoter', 'opts': {'color':[1.00, 0.75, 0.17]}}
rbs3 = {'name':'RBS', 'start':121, 'end':130, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
gamma = {'name':'gamma', 'start':131, 'end':150, 'type':'CDS', 'opts':{'label': 'gamma', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -1, 'label_style':'italic', 'color':[0.38, 0.65, 0.87]}}
term3 = {'name':'Term', 'start':151, 'end':165, 'type':'Terminator'}
lac_repress = {'from_part':laci, 'to_part':plac, 'type':'Repression', 'opts':{'linewidth':1, 'color':[0.38, 0.82, 0.32]}}
gamma_repress = {'from_part':gamma, 'to_part':pgamma, 'type':'Repression', 'opts':{'linewidth':1, 'color':[0.38, 0.65, 0.87]}}
tet_repress = {'from_part':tetr, 'to_part':ptet, 'type':'Repression', 'opts':{'linewidth':1, 'color':[1.00, 0.75, 0.17]}}
def repressilator(y, t):
mtet, mlac, mgamma, tet, lac, gamma = y
dmtet = -mtet + (alpha / (1 + lac**n)) + leak
dtet = -beta * (tet - mtet)
dmlac = -mlac + (alpha / (1 + gamma**n)) + leak
dlac = -beta * (lac - mlac)
dmgamma = -mgamma + (alpha / (1 + tet**n)) + leak
dgamma = -beta * (gamma - mgamma)
return [dmtet, dmlac, dmgamma, dtet, dlac, dgamma]
def repression(val, Kd, power):
"""Function takes a value and Kd. Function fits the value to a hill function with n=power and Kd
and returns the fraction bound."""
new_val = val**power / (Kd**power + val** power)
return new_val
def expression(val, lims):
"""function takes a value between two limits (as a tuple) and returns the value normalized
by the limits to be between 0 and 1"""
new_val = (val - lims[0]) / (lims[1] - lims[0])
return new_val
def rescale(val, lims):
"""function takes a value between 0 and 1 and normalizes it between the limits in lims"""
new_val = (val*(lims[1]-lims[0])) + lims[0]
return new_val
def plot_construct(ax, t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma):
tind = int(t*10)
exp_lims = (1.0, 4.0)
ax.set_title('t = {}'.format(t), fontsize=8)
# Set color for each of the CDSs
tetr['opts']['color'] = [rescale(1 - expression(ymtet[tind], exp_lims), (1.0, 1.0)),
rescale(1 - expression(ymtet[tind], exp_lims), (0.75, 1.0)),
rescale(1 - expression(ymtet[tind], exp_lims), (0.17, 1.0))]
laci['opts']['color'] = [rescale(1 - expression(ymlac[tind], exp_lims), (0.38, 1.0)),
rescale(1 - expression(ymlac[tind], exp_lims), (0.82, 1.0)),
rescale(1 - expression(ymlac[tind], exp_lims), (0.32, 1.0))]
gamma['opts']['color'] = [rescale(1 - expression(ymgamma[tind], exp_lims), (0.38, 1.0)),
rescale(1 - expression(ymgamma[tind], exp_lims), (0.65, 1.0)),
rescale(1 - expression(ymgamma[tind], exp_lims), (0.87, 1.0))]
# Set transparency for each of the regulatory lines
lac_repress['opts']['color'] = [0.38, 0.82, 0.32,
rescale(repression(ylac[tind], 2.0, 8), (0.2, 1.0))]
gamma_repress['opts']['color'] = [0.38, 0.65, 0.87,
rescale(repression(ygamma[tind], 2.0, 8), (0.2, 1.0))]
tet_repress['opts']['color'] = [1.00, 0.75, 0.17,
rescale(repression(ytet[tind], 2.0, 8), (0.2, 1.0))]
# Set width for each of the regulatory lines
lac_repress['opts']['linewidth'] = rescale(repression(ylac[tind], 2.0, 8), (0.5, 2.0))
gamma_repress['opts']['linewidth'] = rescale(repression(ygamma[tind], 2.0, 8), (0.5, 2.0))
tet_repress['opts']['linewidth'] = rescale(repression(ytet[tind], 2.0, 8), (0.5, 2.0))
dnaplotlib.plot_sbol_designs([ax], [[plac, rbs1, tetr, term1, pgamma, rbs2, laci, term2, ptet, rbs3, gamma, term3]],
[[lac_repress, gamma_repress, tet_repress]])
ax.set_ylim([-10, 31])
def movie(ts, ymtet, ymlac, ymgamma, ytet, ylac, ygamma):
for t in ts:
plt.close()
plt.figure(figsize=(4, 3.5))
gs = gridspec.GridSpec(3, 1, height_ratios=[2, 0.5, 1])
ax = plt.subplot(gs[0])
plt.plot(ts[:int(t*10)+1], ytet[:int(t*10)+1], color=[1.00, 0.75, 0.17])
plt.plot(ts[:int(t*10)+1], ylac[:int(t*10)+1], color=[0.38, 0.82, 0.32])
plt.plot(ts[:int(t*10)+1], ygamma[:int(t*10)+1], color=[0.38, 0.65, 0.87])
plt.xlim([0, 30])
plt.ylim([1,4])
ax.tick_params(axis='both', labelsize=8, width=0.8, length=3)
ax.yaxis.tick_left()
ax.xaxis.tick_bottom()
ax.set_xlabel('Time', fontsize=8, labelpad=3)
ax.set_ylabel('Protein Concentration', fontsize=8, labelpad=4)
plt.legend(['tetR', 'lacI', 'gamma'], frameon=False, fontsize=8, labelspacing=0.15, loc=(0.03,0.65))
plt.plot(ts[int(t*10)], ytet[int(t*10)], '.', color=[1.00, 0.75, 0.17], markersize=6.0)
plt.plot(ts[int(t*10)], ylac[int(t*10)], '.', color=[0.38, 0.82, 0.32], markersize=6.0)
plt.plot(ts[int(t*10)], ygamma[int(t*10)], '.', color=[0.38, 0.65, 0.87], markersize=6.0)
ax = plt.subplot(gs[2])
plot_construct(ax, t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
plt.savefig("movie/repressilator_t{}.jpg".format(t), dpi=300)
def main():
t = np.arange(0, 30.1, 0.1)
ymtet, ymlac, ymgamma, ytet, ylac, ygamma = list(zip(*odeint(repressilator, initial, t)))
plt.close()
plt.figure(figsize=(3.5, 6.5))
gs = gridspec.GridSpec(8, 1, height_ratios=[1, 2.5, 0.1, 1, 1, 1, 1, 1])
# Plot of repressilator circuit
ax = plt.subplot(gs[0])
dnaplotlib.plot_sbol_designs([ax], [[plac, rbs1, tetr, term1, pgamma, rbs2, laci, term2, ptet, rbs3, gamma, term3]],
[[lac_repress, gamma_repress, tet_repress]])
ax.set_ylim([-10, 31])
# Plot of repressilator dynamics
ax = plt.subplot(gs[1])
plt.plot(t, ytet, color=[1.00, 0.75, 0.17])
plt.plot(t, ylac, color=[0.38, 0.82, 0.32])
plt.plot(t, ygamma, color=[0.38, 0.65, 0.87])
plt.axvline(x=1, color='k', linewidth=0.7)
plt.axvline(x=12, color='k', linewidth=0.7)
plt.axvline(x=25.3, color='k', linewidth=0.7)
plt.axvline(x=27.3, color='k', linewidth=0.7)
plt.axvline(x=29.4, color='k', linewidth=0.7)
plt.ylim([1,4])
ax.tick_params(axis='both', labelsize=8, width=0.8, length=3)
ax.yaxis.tick_left()
ax.xaxis.tick_bottom()
ax.set_xlabel('Time', fontsize=8, labelpad=1)
ax.set_ylabel('Protein Concentration', fontsize=8, labelpad=2)
plt.legend(['tetR', 'lacI', 'gamma'], frameon=False, fontsize=8, labelspacing=0.15, loc=(0.06,0.65))
# Plot of each timepoint
ax = plt.subplot(gs[3])
plot_construct(ax, 1, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[4])
plot_construct(ax, 12, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[5])
plot_construct(ax, 25.3, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[6])
plot_construct(ax, 27.3, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[7])
plot_construct(ax, 29.4, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
# Update subplot spacing
plt.subplots_adjust(hspace=0.4, left=0.12, right=0.95, top=0.99, bottom=0.01)
# Save the figure
plt.savefig('repressilator_animate.pdf', transparent=True)
plt.savefig('repressilator_animate.png', dpi=300)
# Generate the movie frames
movie(t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
if __name__ == '__main__':
main()
| mit |
coderrick/three.js | utils/converters/obj/split_obj.py | 369 | 12687 | """Split single OBJ model into mutliple OBJ files by materials
-------------------------------------
How to use
-------------------------------------
python split_obj.py -i infile.obj -o outfile
Will generate:
outfile_000.obj
outfile_001.obj
...
outfile_XXX.obj
-------------------------------------
Parser based on format description
-------------------------------------
http://en.wikipedia.org/wiki/Obj
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
TRUNCATE = False
SCALE = 1.0
# #####################################################
# Templates
# #####################################################
TEMPLATE_OBJ = u"""\
################################
# OBJ generated by split_obj.py
################################
# Faces: %(nfaces)d
# Vertices: %(nvertices)d
# Normals: %(nnormals)d
# UVs: %(nuvs)d
################################
# vertices
%(vertices)s
# normals
%(normals)s
# uvs
%(uvs)s
# faces
%(faces)s
"""
TEMPLATE_VERTEX = "v %f %f %f"
TEMPLATE_VERTEX_TRUNCATE = "v %d %d %d"
TEMPLATE_NORMAL = "vn %.5g %.5g %.5g"
TEMPLATE_UV = "vt %.5g %.5g"
TEMPLATE_FACE3_V = "f %d %d %d"
TEMPLATE_FACE4_V = "f %d %d %d %d"
TEMPLATE_FACE3_VT = "f %d/%d %d/%d %d/%d"
TEMPLATE_FACE4_VT = "f %d/%d %d/%d %d/%d %d/%d"
TEMPLATE_FACE3_VN = "f %d//%d %d//%d %d//%d"
TEMPLATE_FACE4_VN = "f %d//%d %d//%d %d//%d %d//%d"
TEMPLATE_FACE3_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d"
TEMPLATE_FACE4_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d"
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v': v, 't': t, 'n': n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #############################################################################
# API - Breaker
# #############################################################################
def break_obj(infile, outfile):
"""Break infile.obj to outfile.obj
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
# sort faces by materials
chunks = {}
for face in faces:
material = face["material"]
if not material in chunks:
chunks[material] = {"faces": [], "vertices": set(), "normals": set(), "uvs": set()}
chunks[material]["faces"].append(face)
# extract unique vertex / normal / uv indices used per chunk
for material in chunks:
chunk = chunks[material]
for face in chunk["faces"]:
for i in face["vertex"]:
chunk["vertices"].add(i)
for i in face["normal"]:
chunk["normals"].add(i)
for i in face["uv"]:
chunk["uvs"].add(i)
# generate new OBJs
for mi, material in enumerate(chunks):
chunk = chunks[material]
# generate separate vertex / normal / uv index lists for each chunk
# (including mapping from original to new indices)
# get well defined order
new_vertices = list(chunk["vertices"])
new_normals = list(chunk["normals"])
new_uvs = list(chunk["uvs"])
# map original => new indices
vmap = {}
for i, v in enumerate(new_vertices):
vmap[v] = i + 1
nmap = {}
for i, n in enumerate(new_normals):
nmap[n] = i + 1
tmap = {}
for i, t in enumerate(new_uvs):
tmap[t] = i + 1
# vertices
pieces = []
for i in new_vertices:
vertex = vertices[i-1]
txt = TEMPLATE_VERTEX % (vertex[0], vertex[1], vertex[2])
pieces.append(txt)
str_vertices = "\n".join(pieces)
# normals
pieces = []
for i in new_normals:
normal = normals[i-1]
txt = TEMPLATE_NORMAL % (normal[0], normal[1], normal[2])
pieces.append(txt)
str_normals = "\n".join(pieces)
# uvs
pieces = []
for i in new_uvs:
uv = uvs[i-1]
txt = TEMPLATE_UV % (uv[0], uv[1])
pieces.append(txt)
str_uvs = "\n".join(pieces)
# faces
pieces = []
for face in chunk["faces"]:
txt = ""
fv = face["vertex"]
fn = face["normal"]
ft = face["uv"]
if len(fv) == 3:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
if len(fn) == 3 and len(ft) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc)
elif len(fn) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
txt = TEMPLATE_FACE3_VN % (va, na, vb, nb, vc, nc)
elif len(ft) == 3:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VT % (va, ta, vb, tb, vc, tc)
else:
txt = TEMPLATE_FACE3_V % (va, vb, vc)
elif len(fv) == 4:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
vd = vmap[fv[3]]
if len(fn) == 4 and len(ft) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc, vd, td, nd)
elif len(fn) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
txt = TEMPLATE_FACE4_VN % (va, na, vb, nb, vc, nc, vd, nd)
elif len(ft) == 4:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VT % (va, ta, vb, tb, vc, tc, vd, td)
else:
txt = TEMPLATE_FACE4_V % (va, vb, vc, vd)
pieces.append(txt)
str_faces = "\n".join(pieces)
# generate OBJ string
content = TEMPLATE_OBJ % {
"nfaces" : len(chunk["faces"]),
"nvertices" : len(new_vertices),
"nnormals" : len(new_normals),
"nuvs" : len(new_uvs),
"vertices" : str_vertices,
"normals" : str_normals,
"uvs" : str_uvs,
"faces" : str_faces
}
# write OBJ file
outname = "%s_%03d.obj" % (outfile, mi)
f = open(outname, "w")
f.write(content)
f.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o prefix" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:x:", ["help", "input=", "output=", "truncatescale="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Splitting [%s] into [%s_XXX.obj] ..." % (infile, outfile)
break_obj(infile, outfile)
| mit |
cetic/ansible | lib/ansible/executor/playbook_executor.py | 27 | 12744 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_native, to_text
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self.passwords = passwords
self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(new_play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.RETRY_FILES_SAVE_PATH
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if self._options.syntax:
display.display("No issues encountered")
return result
return result
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
'''
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e)))
return False
return True
| gpl-3.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/timeseries/python/timeseries/state_space_models/level_trend_test.py | 91 | 2058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for level and trend state space model components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import level_trend
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import test_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class SpecialCaseTests(test.TestCase):
def test_adder_transition_to_powers(self):
num_steps = 3
dtype = dtypes.float64
adder = level_trend.AdderStateSpaceModel(
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.transition_power_test_template(
test_case=self, model=adder, num_steps=num_steps)
def test_adder_noise_accumulator(self):
num_steps = 3
dtype = dtypes.float64
use_level_noise = True
adder = level_trend.AdderStateSpaceModel(
use_level_noise=use_level_noise,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtype))
test_utils.noise_accumulator_test_template(
test_case=self, model=adder, num_steps=num_steps)
if __name__ == "__main__":
test.main()
| mit |
apocalypsebg/odoo | addons/website_crm_partner_assign/controllers/main.py | 271 | 7541 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.tools.translate import _
class WebsiteCrmPartnerAssign(http.Controller):
_references_per_page = 40
@http.route([
'/partners',
'/partners/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>',
'/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>',
'/partners/country/<model("res.country"):country>',
'/partners/country/<model("res.country"):country>/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>',
], type='http', auth="public", website=True)
def partners(self, country=None, grade=None, page=0, **post):
country_all = post.pop('country_all', False)
partner_obj = request.registry['res.partner']
country_obj = request.registry['res.country']
search = post.get('search', '')
base_partner_domain = [('is_company', '=', True), ('grade_id.website_published', '=', True), ('website_published', '=', True)]
if search:
base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)]
# group by grade
grade_domain = list(base_partner_domain)
if not country and not country_all:
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
if country_ids:
country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
if country:
grade_domain += [('country_id', '=', country.id)]
grades = partner_obj.read_group(
request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id DESC", context=request.context)
grades_partners = partner_obj.search(
request.cr, SUPERUSER_ID, grade_domain,
context=request.context, count=True)
# flag active grade
for grade_dict in grades:
grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': (0, _("All Categories")),
'active': bool(grade is None),
})
# group by country
country_domain = list(base_partner_domain)
if grade:
country_domain += [('grade_id', '=', grade.id)]
countries = partner_obj.read_group(
request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, SUPERUSER_ID, country_domain,
context=request.context, count=True)
# flag active country
for country_dict in countries:
country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries")),
'active': bool(country is None),
})
# current search
if grade:
base_partner_domain += [('grade_id', '=', grade.id)]
if country:
base_partner_domain += [('country_id', '=', country.id)]
# format pager
if grade and not country:
url = '/partners/grade/' + slug(grade)
elif country and not grade:
url = '/partners/country/' + slug(country)
elif country and grade:
url = '/partners/grade/' + slug(grade) + '/country/' + slug(country)
else:
url = '/partners'
url_args = {}
if search:
url_args['search'] = search
if country_all:
url_args['country_all'] = True
partner_count = partner_obj.search_count(
request.cr, SUPERUSER_ID, base_partner_domain,
context=request.context)
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page, scope=7,
url_args=url_args)
# search partners matching current search parameters
partner_ids = partner_obj.search(
request.cr, SUPERUSER_ID, base_partner_domain,
order="grade_id DESC",
context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page
partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context)
# remove me in trunk
partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True)
partners = partners[pager['offset']:pager['offset'] + self._references_per_page]
google_map_partner_ids = ','.join(map(str, [p.id for p in partners]))
values = {
'countries': countries,
'current_country': country,
'grades': grades,
'current_grade': grade,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "%s" % werkzeug.url_encode(post),
}
return request.website.render("website_crm_partner_assign.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/partners/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, partner_name='', **post):
_, partner_id = unslug(partner_id)
current_grade, current_country = None, None
grade_id = post.get('grade_id')
country_id = post.get('country_id')
if grade_id:
grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context)
if grade_ids:
current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context)
if country_id:
country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context)
if country_ids:
current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {
'main_object': partner,
'partner': partner,
'current_grade': current_grade,
'current_country': current_country
}
return request.website.render("website_crm_partner_assign.partner", values)
return self.partners(**post)
| agpl-3.0 |
odoo-brazil/PySPED | pysped/tabela/pais.py | 9 | 2201 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import os
import unicodedata
CURDIR = os.path.dirname(os.path.abspath(__file__))
class _Pais(object):
def __init__(self, nome='', codigo_bacen='', codigo_anp='',
codigo_siscomex='', nome_frances='', nome_ingles='', iso_3166_2='',
iso_3166_3='', iso_3166_numerico=''):
self.nome = nome
self.codigo_bacen = codigo_bacen
self.codigo_anp = codigo_anp
self.codigo_siscomex = codigo_siscomex
self.nome_frances = nome_frances
self.nome_ingles = nome_ingles
self.iso_3166_2 = iso_3166_2
self.iso_3166_3 = iso_3166_3
self.iso_3166_numerico = iso_3166_numerico
def __str__(self):
return unicode.encode(self.__unicode__(), 'utf-8')
def __unicode__(self):
return self.nome + ' - BACEN: ' + self.codigo_bacen
def __repr__(self):
return str(self)
def _monta_dicionario_bacen():
dicionario = {}
arquivo = open(os.path.join(CURDIR, 'pais.txt'), 'r')
#
# Pula a primeira linha
#
arquivo.readline()
for linha in arquivo:
linha = linha.decode('utf-8').replace('\n', '').replace('\r', '')
campos = linha.split('|')
p = _Pais(nome=campos[0], codigo_bacen=campos[1], codigo_anp=campos[2],
codigo_siscomex=campos[3], nome_frances=campos[4], nome_ingles=campos[5],
iso_3166_2=campos[6], iso_3166_3=campos[7], iso_3166_numerico=campos[8])
dicionario[p.codigo_bacen] = p
return dicionario
def maiuscula_sem_acento(texto):
return unicodedata.normalize('NFKD', texto).encode('ascii', 'ignore').upper().encode('utf-8')
def _monta_dicionario_nome():
dicionario = {}
for k, v in PAIS_BACEN.items():
dicionario[maiuscula_sem_acento(v.nome)] = v
return dicionario
if not hasattr(sys.modules[__name__], 'PAIS_BACEN'):
PAIS_BACEN = _monta_dicionario_bacen()
if not hasattr(sys.modules[__name__], 'PAIS_BRASIL'):
PAIS_BRASIL = PAIS_BACEN['1058']
if not hasattr(sys.modules[__name__], 'PAIS_NOME'):
PAIS_NOME = _monta_dicionario_nome()
| lgpl-2.1 |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/tests/unit/v2/test_handler_pool.py | 2 | 10131 | # Copyright 2016 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from a10_neutron_lbaas.tests.unit.v2 import fake_objs
from a10_neutron_lbaas.tests.unit.v2 import test_base
import a10_neutron_lbaas.a10_exceptions as a10_ex
class TestPools(test_base.HandlerTestBase):
def test_sanity(self):
pass
def test_create(self):
methods = {
'ROUND_ROBIN':
self.a.last_client.slb.service_group.ROUND_ROBIN,
'LEAST_CONNECTIONS':
self.a.last_client.slb.service_group.LEAST_CONNECTION,
'SOURCE_IP':
self.a.last_client.slb.service_group.WEIGHTED_LEAST_CONNECTION,
}
protocols = {
'TCP': self.a.last_client.slb.service_group.TCP,
'UDP': self.a.last_client.slb.service_group.UDP,
}
persistences = [None, 'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
listeners = [False, True]
for p in protocols.keys():
for m in methods.keys():
for pers in persistences:
for listener in listeners:
self.a.reset_mocks()
saw_exception = False
pool = fake_objs.FakePool(p, m, pers, listener)
try:
self.a.pool.create(None, pool)
except a10_ex.UnsupportedFeature as e:
if pers == 'APP_COOKIE':
saw_exception = True
else:
raise e
self.print_mocks()
# (self.a.last_client.slb.service_group.create.
# assert_called_with(
# pool.id,
# axapi_args={"service_group": {}},
# lb_method=methods(m),
# protocol=protocols(p)))
if not saw_exception:
n = str(self.a.last_client.mock_calls).index(
'slb.service_group.create')
self.assertTrue(n >= 0)
if pers == 'SOURCE_IP':
(self.a.last_client.slb.template.
src_ip_persistence.create.
assert_called_with(pool.id))
elif pers == 'HTTP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.create.
assert_called_with(pool.id))
elif pers == 'APP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.create.
assert_called_with(pool.id))
def test_create_with_template(self):
template = {
"service-group": {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
}
exp_template = {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
for k, v in self.a.config.get_devices().items():
v['templates'] = template
pers1 = None
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers1, True)
self.a.pool.create(None, pool)
self.a.last_client.slb.service_group.create.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=exp_template)
def test_update(self):
pers1 = None
pers2 = None
old_pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS', pers1, True)
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers2, True)
self.a.pool.update(None, pool, old_pool)
self.print_mocks()
self.a.last_client.slb.service_group.update.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=None)
def test_update_with_template(self):
template = {
"service-group": {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
}
exp_template = {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
for k, v in self.a.config.get_devices().items():
v['templates'] = template
pers1 = None
pers2 = None
old_pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS', pers1, True)
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers2, True)
self.a.pool.update(None, pool, old_pool)
self.a.last_client.slb.service_group.update.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=exp_template)
def test_delete(self):
members = [[], [fake_objs.FakeMember()]]
hms = [None, fake_objs.FakeHM('PING')]
persistences = [None, 'SOURCE_IP', 'HTTP_COOKIE']
listeners = [False, True]
for m in members:
for hm in hms:
for pers in persistences:
for lst in listeners:
self.a.reset_mocks()
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN',
pers, lst,
members=m,
hm=hm)
self.a.pool.neutron.member_count.return_value = 1
self.a.pool.delete(None, pool)
self.print_mocks()
(self.a.last_client.slb.service_group.delete.
assert_called_with(pool.id))
if pers == 'SOURCE_IP':
(self.a.last_client.slb.template.
src_ip_persistence.delete.
assert_called_with(pool.id))
elif pers == 'HTTP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.delete.
assert_called_with(pool.id))
def _test_stats(self):
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', None, False)
actual = self.a.pool.stats(None, pool)
return pool, actual
def test_stats_calls_service_group_stats(self):
pool, actual = self._test_stats()
(self.a.last_client.slb.service_group.stats.
assert_called_with(pool.id))
def test_stats_returns_stats(self):
pool, actual = self._test_stats()
self.assertIn("stats", actual)
def test_stats_returns_members(self):
pool, actual = self._test_stats()
self.assertIn("members", actual)
def _test_create_expressions(self, os_name, pattern, expressions=None):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = expressions or self.a.config.get_service_group_expressions()
expected = expressions.get(pattern, {}).get("json", None) or ""
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = os_name
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertIn(str(expected), s)
def test_create_expressions_none(self):
self._test_create_expressions("mypool", None, {})
def test_create_expressions_match_beginning(self):
self._test_create_expressions("securepool", self.EXPR_BEGIN)
def test_create_expressions_match_end(self):
self._test_create_expressions("poolweb", self.EXPR_END)
def test_create_expressions_match_charclass(self):
self._test_create_expressions("poolwwpool", self.EXPR_CLASS)
def test_create_expressions_nomatch(self):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = self.a.config.get_service_group_expressions()
expected = expressions["beginning"]
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = "thepool"
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertNotIn(str(expected), s)
def test_create_empty_name_noexception(self):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = self.a.config.get_service_group_expressions()
expected = expressions["beginning"]
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = None
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertNotIn(str(expected), s)
| apache-2.0 |
gdimitris/ChessPuzzlerBackend | Virtual_Environment/lib/python2.7/site-packages/sqlalchemy/ext/compiler.py | 81 | 15770 | # ext/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print str(s)
Produces::
SELECT [x], [y]
Dialect-specific compilation rules
==================================
Compilers can also be made dialect-specific. The appropriate compiler will be
invoked for the dialect in use::
from sqlalchemy.schema import DDLElement
class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER COLUMN %s ..." % element.column.name
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s (%s)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select)
)
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print insert
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(constraint.expression)
)
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \\
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@compiles(Insert)
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For Postgresql and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - its equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
compiler.process(arg1),
compiler.process(arg2),
compiler.process(arg1),
compiler.process(arg2),
)
Example usage::
Session.query(Account).\\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`.ClauseElement` type."""
def decorate(fn):
existing = class_.__dict__.get('_compiler_dispatcher', None)
existing_dispatch = class_.__dict__.get('_compiler_dispatch')
if not existing:
existing = _dispatcher()
if existing_dispatch:
existing.specs['default'] = existing_dispatch
# TODO: why is the lambda needed ?
setattr(class_, '_compiler_dispatch',
lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatcher', existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs['default'] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`.ClauseElement` type."""
if hasattr(class_, '_compiler_dispatcher'):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs['default']
except KeyError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.