repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/services/issue_tracker_service.py | 1 | 10171 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a layer of abstraction for the issue tracker API."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import httplib
import json
import logging
from apiclient import discovery
from apiclient import errors
_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
STATUS_DUPLICATE = 'Duplicate'
class IssueTrackerService(object):
"""Class for updating bug issues."""
def __init__(self, http):
"""Initializes an object for adding and updating bugs on the issue tracker.
This object can be re-used to make multiple requests without calling
apliclient.discovery.build multiple times.
This class makes requests to the Monorail API.
API explorer: https://goo.gl/xWd0dX
Args:
http: A Http object that requests will be made through; this should be an
Http object that's already authenticated via OAuth2.
"""
# Monorail recommends a 15s timeout on all requests.
# https://github.com/catapult-project/catapult/issues/4115
http.timeout = 15
self._service = discovery.build(
'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI, http=http)
def AddBugComment(self,
bug_id,
comment,
status=None,
cc_list=None,
merge_issue=None,
components=None,
labels=None,
owner=None,
send_email=True):
"""Adds a comment with the bisect results to the given bug.
Args:
bug_id: Bug ID of the issue to update.
comment: Bisect results information.
status: A string status for bug, e.g. Assigned, Duplicate, WontFix, etc.
cc_list: List of email addresses of users to add to the CC list.
merge_issue: ID of the issue to be merged into; specifying this option
implies that the status should be "Duplicate".
components: List of components to add/remove from the issue.
labels: List of labels for bug.
owner: Owner of the bug.
send_email: True to send email to bug cc list, False otherwise.
Returns:
True if successful, False otherwise.
"""
if not bug_id or bug_id < 0:
return False
body = {'content': comment}
updates = {}
# Mark issue as duplicate when relevant bug ID is found in the datastore.
# Avoid marking an issue as duplicate of itself.
if merge_issue and int(merge_issue) != bug_id:
status = STATUS_DUPLICATE
updates['mergedInto'] = merge_issue
logging.info('Bug %s marked as duplicate of %s', bug_id, merge_issue)
if status:
updates['status'] = status
if cc_list:
updates['cc'] = cc_list
if labels:
updates['labels'] = labels
if owner:
updates['owner'] = owner
if components:
updates['components'] = components
body['updates'] = updates
return self._MakeCommentRequest(bug_id, body, send_email=send_email)
def List(self, **kwargs):
"""Makes a request to the issue tracker to list bugs."""
request = self._service.issues().list(projectId='chromium', **kwargs)
return self._ExecuteRequest(request)
def GetIssue(self, issue_id):
"""Makes a request to the issue tracker to get an issue."""
request = self._service.issues().get(projectId='chromium', issueId=issue_id)
return self._ExecuteRequest(request)
def _MakeCommentRequest(self, bug_id, body, retry=True, send_email=True):
"""Makes a request to the issue tracker to update a bug.
Args:
bug_id: Bug ID of the issue.
body: Dict of comment parameters.
retry: True to retry on failure, False otherwise.
send_email: True to send email to bug cc list, False otherwise.
Returns:
True if successful posted a comment or issue was deleted. False if
making a comment failed unexpectedly.
"""
request = self._service.issues().comments().insert(
projectId='chromium',
issueId=bug_id,
sendEmail=send_email,
body=body)
try:
if self._ExecuteRequest(request, ignore_error=False):
return True
except errors.HttpError as e:
reason = _GetErrorReason(e)
if reason is None:
reason = ''
# Retry without owner if we cannot set owner to this issue.
if retry and 'The user does not exist' in reason:
# Remove both the owner and the cc list.
# TODO (crbug.com/806392): We should probably figure out which user it
# is rather than removing all of them.
if 'owner' in body['updates']:
del body['updates']['owner']
if 'cc' in body['updates']:
del body['updates']['cc']
return self._MakeCommentRequest(bug_id, body, retry=False)
elif retry and 'Issue owner must be a project member' in reason:
# Remove the owner but retain the cc list.
if 'owner' in body['updates']:
del body['updates']['owner']
return self._MakeCommentRequest(bug_id, body, retry=False)
# This error reason is received when issue is deleted.
elif 'User is not allowed to view this issue' in reason:
logging.warning('Unable to update bug %s with body %s', bug_id, body)
return True
logging.error('Error updating bug %s with body %s', bug_id, body)
return False
def NewBug(self, title, description, labels=None, components=None,
owner=None, cc=None, status=None):
"""Creates a new bug.
Args:
title: The short title text of the bug.
description: The body text for the bug.
labels: Starting labels for the bug.
components: Starting components for the bug.
owner: Starting owner account name.
cc: CSV of email addresses to CC on the bug.
status: defaults to Assigned if owner else Unconfirmed.
Returns:
A dict containing the bug_id (if successful), or the error message if not.
"""
body = {
'title': title,
'summary': title,
'description': description,
'labels': labels or [],
'components': components or [],
'status': status or ('Assigned' if owner else 'Unconfirmed'),
'projectId': 'chromium'
}
if owner:
body['owner'] = {'name': owner}
if cc:
body['cc'] = [{'name': account.strip()}
for account in cc.split(',') if account.strip()]
return self._MakeCreateRequest(body)
def _MakeCreateRequest(self, body):
"""Makes a request to create a new bug.
Args:
body: The request body parameter dictionary.
Returns:
A dict containing the bug_id (if successful), or the error message if not.
"""
request = self._service.issues().insert(
projectId='chromium',
sendEmail=True,
body=body)
logging.info('Making create issue request with body %s', body)
try:
response = self._ExecuteRequest(request, ignore_error=False)
if response and 'id' in response:
return {'bug_id': response['id']}
logging.error('Failed to create new bug; response %s', response)
except errors.HttpError as e:
reason = _GetErrorReason(e)
return {'error': reason}
except httplib.HTTPException as e:
return {'error': str(e)}
return {'error': 'Unknown failure creating issue.'}
def GetIssueComments(self, bug_id):
"""Gets all the comments for the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A list of comments
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if not response:
return None
return [{
'author': r['author'].get('name'),
'content': r['content'],
'published': r['published']
} for r in response.get('items')]
def GetLastBugCommentsAndTimestamp(self, bug_id):
"""Gets last updated comments and timestamp in the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A dictionary with last comment and timestamp, or None on failure.
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if response and all(v in list(response.keys())
for v in ['totalResults', 'items']):
bug_comments = response.get('items')[response.get('totalResults') - 1]
if bug_comments.get('content') and bug_comments.get('published'):
return {
'comment': bug_comments.get('content'),
'timestamp': bug_comments.get('published')
}
return None
def _MakeGetCommentsRequest(self, bug_id):
"""Makes a request to the issue tracker to get comments in the bug."""
# TODO (prasadv): By default the max number of comments retrieved in
# one request is 100. Since bisect-fyi jobs may have more then 100
# comments for now we set this maxResults count as 10000.
# Remove this max count once we find a way to clear old comments
# on FYI issues.
request = self._service.issues().comments().list(
projectId='chromium',
issueId=bug_id,
maxResults=10000)
return self._ExecuteRequest(request)
def _ExecuteRequest(self, request, ignore_error=True):
"""Makes a request to the issue tracker.
Args:
request: The request object, which has a execute method.
Returns:
The response if there was one, or else None.
"""
try:
response = request.execute()
return response
except errors.HttpError as e:
logging.error('HttpError: %r', e)
if ignore_error:
return None
raise e
def _GetErrorReason(request_error):
if request_error.resp.get('content-type', '').startswith('application/json'):
error_json = json.loads(request_error.content).get('error')
if error_json:
return error_json.get('message')
return None
| bsd-3-clause |
krahser/djbot | src/DJBot/views/settings.py | 1 | 1518 | from flask import Blueprint, jsonify, request
from flask_security import login_required, roles_required
from DJBot.forms.generic import SelectName
from DJBot.utils.ssh import generate_key, remove_key
from DJBot.models.playbook import Playbook
from DJBot.models.inventory import Room, Host
import os
import re
settings_bp = Blueprint('settings', __name__)
def get_keys():
keys = os.listdir(os.getenv("HOME")+"/.ssh/keys")
keys = [each for each in keys if not re.match(".*\.pub", each)]
return keys
def get_results():
return os.listdir(os.getenv("LOGS"))
@settings_bp.route('/main', methods=['GET'])
@login_required
@roles_required('user')
def main():
inventory = Room.query.count() + Host.query.count()
return jsonify({"playbooks": Playbook.query.count(),
"inventory": inventory,
"results": len(get_results())})
@settings_bp.route('/keys', methods=['GET'])
@login_required
@roles_required('user')
def keys_get():
return jsonify({"keys": get_keys()})
@settings_bp.route('/key_new', methods=['POST'])
@login_required
@roles_required('user')
def key_new():
form = SelectName(request.form)
if form.validate():
generate_key(form.name.data)
return jsonify({"keys": get_keys()})
@settings_bp.route('/key_delete', methods=['POST'])
@login_required
@roles_required('user')
def key_delete():
form = SelectName(request.form)
if form.validate():
remove_key(form.name.data)
return jsonify({"keys": get_keys()})
| gpl-3.0 |
helldorado/ansible | lib/ansible/plugins/cliconf/edgeswitch.py | 16 | 4612 | #
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: edgeswitch
short_description: Use edgeswitch cliconf to run command on EdgeSwitch platform
description:
- This edgeswitch plugin provides low level abstraction apis for
sending and receiving CLI commands from Ubiquiti EdgeSwitch network devices.
version_added: "2.8"
"""
import re
import time
import json
from itertools import chain
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.config import dumps
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
from ansible.module_utils.common._collections_compat import Mapping
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'edgeswitch'
reply = self.get(command='show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Software Version\.+ (.*)', data)
if match:
device_info['network_os_version'] = match.group(1).strip(',')
match = re.search(r'^Machine Model\.+ (.*)', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'System Name\.+ (.*)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running', flags=None):
if source not in ('running', 'startup'):
raise ValueError("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = 'show running-config '
else:
cmd = 'show startup-config '
if flags:
cmd += ' '.join(to_list(flags))
cmd = cmd.strip()
return self.send_command(cmd)
@enable_mode
def edit_config(self, commands):
resp = {}
results = []
requests = []
self.send_command('configure')
for line in to_list(commands):
if not isinstance(line, Mapping):
line = {'command': line}
cmd = line['command']
if cmd != 'end' and cmd[0] != '!':
results.append(self.send_command(**line))
requests.append(cmd)
self.send_command('end')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, check_all=False):
if not command:
raise ValueError('must provide value of command to execute')
if output:
raise ValueError("'output' value %s is not supported for get" % output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['run_commands']
return json.dumps(result)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
raise ValueError("'output' value %s is not supported for run_commands" % output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, 'err', e)
responses.append(out)
return responses
| gpl-3.0 |
syaiful6/django | django/contrib/gis/gdal/libgdal.py | 449 | 3598 | from __future__ import unicode_literals
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal111', 'gdal110', 'gdal19', 'gdal18', 'gdal17']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal1.11.0', 'gdal1.10.0', 'gdal1.9.0',
'gdal1.8.0', 'gdal1.7.0']
else:
raise GDALException('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise GDALException('Could not find the GDAL library (tried "%s"). '
'Try setting GDAL_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Returns the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Returns GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Returns only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Returns the full GDAL version information."
return _version_info('')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version().decode()
m = version_regex.match(ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
return {key: m.group(key) for key in ('major', 'minor', 'subminor')}
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s' % (error_number, message))
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
| bsd-3-clause |
52ai/django-ccsds | django/template/backends/dummy.py | 113 | 1720 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import io
import string
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import TemplateDoesNotExist
from django.utils.html import conditional_escape
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class TemplateStrings(BaseEngine):
app_dirname = 'template_strings'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
if options:
raise ImproperlyConfigured(
"Unknown options: {}".format(", ".join(options)))
super(TemplateStrings, self).__init__(params)
def from_string(self, template_code):
return Template(template_code)
def get_template(self, template_name):
for template_file in self.iter_template_filenames(template_name):
try:
with io.open(template_file, encoding=settings.FILE_CHARSET) as fp:
template_code = fp.read()
except IOError:
continue
return Template(template_code)
else:
raise TemplateDoesNotExist(template_name)
class Template(string.Template):
def render(self, context=None, request=None):
if context is None:
context = {}
else:
context = {k: conditional_escape(v) for k, v in context.items()}
if request is not None:
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.safe_substitute(context)
| bsd-3-clause |
repotvsupertuga/tvsupertuga.repository | instal/script.module.requests/lib/requests/packages/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-2.0 |
graingert/alembic | alembic/testing/plugin/plugin_base.py | 8 | 17365 | # plugin/plugin_base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
so that we can continue to support nose and also begin adding new
functionality via py.test.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
from __future__ import absolute_import
try:
# unitttest has a SkipTest also but pytest doesn't
# honor it unless nose is imported too...
from nose import SkipTest
except ImportError:
from _pytest.runner import Skipped as SkipTest
import sys
import re
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
else:
import ConfigParser as configparser
# late imports
fixtures = None
engines = None
provision = None
exclusions = None
warnings = None
assertions = None
requirements = None
config = None
util = None
file_config = None
logging = None
db_opts = {}
include_tags = set()
exclude_tags = set()
options = None
def setup_options(make_option):
make_option("--log-info", action="callback", type="string", callback=_log,
help="turn on info logging for <LOG> (multiple OK)")
make_option("--log-debug", action="callback",
type="string", callback=_log,
help="turn on debug logging for <LOG> (multiple OK)")
make_option("--db", action="append", type="string", dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.")
make_option('--dbs', action='callback', callback=_list_dbs,
help="List available prefab dbs")
make_option("--dburi", action="append", type="string", dest="dburi",
help="Database uri. Multiple OK, "
"first one is run by default.")
make_option("--dropfirst", action="store_true", dest="dropfirst",
help="Drop all tables in the target database first")
make_option("--backend-only", action="store_true", dest="backend_only",
help="Run only tests marked with __backend__")
make_option("--mockpool", action="store_true", dest="mockpool",
help="Use mock pool (asserts only one connection used)")
make_option("--low-connections", action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
"i.e. for Oracle TNS")
make_option("--reversetop", action="store_true",
dest="reversetop", default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)")
make_option("--requirements", action="callback", type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg")
make_option("--with-cdecimal", action="store_true",
dest="cdecimal", default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' "
"for all tests")
make_option("--include-tag", action="callback", callback=_include_tag,
type="string",
help="Include tests with tag <tag>")
make_option("--exclude-tag", action="callback", callback=_exclude_tag,
type="string",
help="Exclude tests with tag <tag>")
make_option("--serverside", action="store_true",
help="Turn on server side cursors for PG")
make_option("--mysql-engine", action="store",
dest="mysql_engine", default=None,
help="Use the specified MySQL storage engine for all tables, "
"default is a db-default/InnoDB combo.")
def configure_follower(follower_ident):
"""Configure required state for a follower.
This invokes in the parent process and typically includes
database creation.
"""
from alembic.testing import provision
provision.FOLLOWER_IDENT = follower_ident
def memoize_important_follower_config(dict_):
"""Store important configuration we will need to send to a follower.
This invokes in the parent process after normal config is set up.
This is necessary as py.test seems to not be using forking, so we
start with nothing in memory, *but* it isn't running our argparse
callables, so we have to just copy all of that over.
"""
dict_['memoized_config'] = {
'db_opts': db_opts,
'include_tags': include_tags,
'exclude_tags': exclude_tags
}
def restore_important_follower_config(dict_):
"""Restore important configuration needed by a follower.
This invokes in the follower process.
"""
global db_opts, include_tags, exclude_tags
db_opts.update(dict_['memoized_config']['db_opts'])
include_tags.update(dict_['memoized_config']['include_tags'])
exclude_tags.update(dict_['memoized_config']['exclude_tags'])
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(['setup.cfg', 'test.cfg'])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config as well
# as nose plugins like coverage
global util, fixtures, engines, exclusions, \
assertions, warnings, profiling,\
config, testing
from alembic.testing import config, warnings, exclusions # noqa
from alembic.testing import engines, fixtures # noqa
from sqlalchemy import util # noqa
warnings.setup_filters()
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith('-info'):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith('-debug'):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options('db')):
print("%20s\t%s" % (macro, file_config.get('db', macro)))
sys.exit(0)
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
def _exclude_tag(opt_str, value, parser):
exclude_tags.add(value.replace('-', '_'))
def _include_tag(opt_str, value, parser):
include_tags.add(value.replace('-', '_'))
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _server_side_cursors(options, file_config):
if options.serverside:
db_opts['server_side_cursors'] = True
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules['decimal'] = cdecimal
@post
def _engine_uri(options, file_config):
from alembic.testing import config
from alembic.testing import provision
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r'[,\s]+', db_token):
if db not in file_config.options('db'):
raise RuntimeError(
"Unknown URI specifier '%s'. "
"Specify --dbs for known uris."
% db)
else:
db_urls.append(file_config.get('db', db))
if not db_urls:
db_urls.append(file_config.get('db', 'default'))
for db_url in db_urls:
cfg = provision.setup_config(
db_url, db_opts, options, file_config, provision.FOLLOWER_IDENT)
if not config._current:
cfg.set_as_current(cfg)
@post
def _engine_pool(options, file_config):
if options.mockpool:
from sqlalchemy import pool
db_opts['poolclass'] = pool.AssertionPool
@post
def _requirements(options, file_config):
requirement_cls = file_config.get('sqla_testing', "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from alembic.testing import config
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = req_cls()
@post
def _prep_testing_database(options, file_config):
from alembic.testing import config
from alembic.testing.exclusions import against
from sqlalchemy import schema
from alembic import util
if util.sqla_08:
from sqlalchemy import inspect
else:
from sqlalchemy.engine.reflection import Inspector
inspect = Inspector.from_engine
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(
schema.Table(vname, schema.MetaData())
))
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(
schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(
schema.Table(vname, schema.MetaData(),
schema="test_schema")
))
for tname in reversed(inspector.get_table_names(
order_by="foreign_key")):
e.execute(schema.DropTable(
schema.Table(tname, schema.MetaData())
))
if config.requirements.schemas.enabled_for_config(cfg):
for tname in reversed(inspector.get_table_names(
order_by="foreign_key", schema="test_schema")):
e.execute(schema.DropTable(
schema.Table(tname, schema.MetaData(),
schema="test_schema")
))
if against(cfg, "postgresql") and util.sqla_100:
from sqlalchemy.dialects import postgresql
for enum in inspector.get_enums("*"):
e.execute(postgresql.DropEnumType(
postgresql.ENUM(
name=enum['name'],
schema=enum['schema'])))
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from alembic.testing import config
config.options = options
config.file_config = file_config
def want_class(cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif cls.__name__.startswith('_'):
return False
elif config.options.backend_only and not getattr(cls, '__backend__',
False):
return False
else:
return True
def want_method(cls, fn):
if not fn.__name__.startswith("test_"):
return False
elif fn.__module__ is None:
return False
elif include_tags:
return (
hasattr(cls, '__tags__') and
exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags)
) or (
hasattr(fn, '_sa_exclusion_extend') and
fn._sa_exclusion_extend.include_test(
include_tags, exclude_tags)
)
elif exclude_tags and hasattr(cls, '__tags__'):
return exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags)
elif exclude_tags and hasattr(fn, '_sa_exclusion_extend'):
return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, '__backend__', False):
for cfg in _possible_configs_for_cls(cls):
name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver)
subcls = type(
name,
(cls, ),
{
"__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)),
}
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
#from sqlalchemy import inspect
#assert not inspect(testing.db).get_table_names()
_restore_engine()
def _restore_engine():
config._current.reset()
def _setup_engine(cls):
if getattr(cls, '__engine_options__', None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng)
def before_test(test, test_module_name, test_class, test_name):
pass
def after_test(test):
pass
def _possible_configs_for_cls(cls, reasons=None):
all_configs = set(config.Config.all_configs())
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, '__only_on__', None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if hasattr(cls, '__requires__'):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
skip_reasons = check.matching_config_reasons(config_obj)
if skip_reasons:
all_configs.remove(config_obj)
if reasons is not None:
reasons.extend(skip_reasons)
break
if hasattr(cls, '__prefer_requires__'):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if not check.enabled_for_config(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
return all_configs
def _do_skips(cls):
reasons = []
all_configs = _possible_configs_for_cls(cls, reasons)
if getattr(cls, '__skip_if__', False):
for c in getattr(cls, '__skip_if__'):
if c():
raise SkipTest("'%s' skipped by %s" % (
cls.__name__, c.__name__)
)
if not all_configs:
if getattr(cls, '__backend__', False):
msg = "'%s' unsupported for implementation '%s'" % (
cls.__name__, cls.__only_on__)
else:
msg = "'%s' unsupported on any DB implementation %s%s" % (
cls.__name__,
", ".join(
"'%s(%s)+%s'" % (
config_obj.db.name,
".".join(
str(dig) for dig in
config_obj.db.dialect.server_version_info),
config_obj.db.driver
)
for config_obj in config.Config.all_configs()
),
", ".join(reasons)
)
raise SkipTest(msg)
elif hasattr(cls, '__prefer_backends__'):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj)
| mit |
strummerTFIU/TFG-IsometricMaps | src/load_info.py | 1 | 4816 | from PIL import Image
from laspy.file import File
import numpy as np
import sys, os
m_file = "mdt_data.txt"
o_file = "orto_data.txt"
l_file = "lidar_data.txt"
a_file = "areas_interest.txt"
laszip = "/home/pablo/Documentos/LAStools/bin/laszip"
def load_mdt_info(png_directory):
print("Loading MDTs data...")
f = open(m_file, "w")
for base, dirs, files in os.walk(png_directory):
for mdt_file in files:
if mdt_file[-4:] == ".txt":
f.write(png_directory + mdt_file[:-3] + "png")
mdt = open(png_directory + mdt_file)
for line in mdt:
f.write(" ")
aux = line.split()
f.write(aux[0])
mdt.close()
f.write("\n")
f.close()
print("Load successful")
def find_mdt(x1, y1, x2, y2):
mdts = []
f = open(m_file, "r")
for line in f:
info = line.split()
# Calculate mdt vertex points
mx1 = float(info[3])
mx2 = float(info[3]) + float(info[1]) * float(info[5])
my1 = float(info[4]) + float(info[2]) * float(info[5])
my2 = float(info[4])
if is_collision(x1, y1, x2, y2, mx1, my1, mx2, my2):
mdts.append(info)
f.close()
return mdts
def load_orto_info(orto_directory):
Image.MAX_IMAGE_PIXELS = 1000000000 # To hide PIL warning
print("Loading ortophotos data...")
f = open(o_file, "w")
for base, dirs, files in os.walk(orto_directory):
for d in dirs:
for base2, dirs2, files2 in os.walk(orto_directory + d):
for dAux in dirs2:
for base3, dirs3, files3 in os.walk(orto_directory + d + "/" + dAux):
for orto_file in files3:
if orto_file[-4:] == ".jpg" or orto_file[-4:] == ".png":
image = orto_directory + d + "/" + dAux + "/" + orto_file
width, height = Image.open(image).size
if orto_file[-4:] == ".jgw":
jgw = open(orto_directory + d + "/" + dAux + "/" + orto_file)
f.write(d + " " + orto_directory + d + "/" + dAux + " ")
aux = jgw.readline().split()
f.write(aux[0] + " ")
jgw.readline()
jgw.readline()
aux = jgw.readline().split()
f.write(aux[0] + " ")
aux = jgw.readline().split()
f.write(aux[0] + " ")
aux = jgw.readline().split()
f.write(aux[0] + " ")
jgw.close()
f.write(str(width) + " ")
f.write(str(height))
f.write("\n")
break
break
f.close()
print("Load successful")
def find_orto(x1, y1, x2, y2, mdts):
ortos = []
f = open(o_file, "r")
for line in f:
info = line.split()
found = False
for mdt in mdts:
if mdt[0][mdt[0].rfind("/") + 1:-4] == info[0]:
found = True
break
if found:
mx1 = float(info[4]) - (float(info[2]) / 2)
my1 = float(info[5]) - (float(info[3]) / 2)
mx2 = mx1 + float(info[2]) * float(info[6]) + float(info[2])
my2 = my1 + float(info[3]) * float(info[7]) + float(info[3])
if is_collision(x1, y1, x2, y2, mx1, my1, mx2, my2):
ortos.append(info)
f.close()
return ortos
def load_lidar_info(lidar_directory):
print("Loading LIDARs data...")
f = open(l_file, "w")
for base, dirs, files in os.walk(lidar_directory):
for lidar_file in files:
lidar_file = lidar_directory + lidar_file
f.write(lidar_file)
os.system(laszip + " -i " + lidar_file + " -o " + lidar_file[:-3] + "LAS")
inFile = File(lidar_file[:-3] + "LAS", mode='r')
x_min = inFile.header.min[0]
x_max = inFile.header.max[0]
y_min = inFile.header.min[1]
y_max = inFile.header.max[1]
f.write(" " + str(x_min) + " " + str(y_min) + " " + str(x_max) + " " + str(y_max) + "\n")
inFile.close()
os.system("rm " + lidar_file[:-3] + "LAS")
f.close()
print("Load successful")
def find_lidar(areas, c1, c2):
lidars = []
f = open(l_file, "r")
for line in f:
info = line.split()
# Calculate lidar vertex points
mx1 = float(info[1])
mx2 = float(info[3])
my1 = float(info[4])
my2 = float(info[2])
for area in areas:
if is_collision(float(area[0]), float(area[1]), float(area[2]), float(area[3]), mx1, my1, mx2, my2):
if is_collision(float(c1[0]), float(c1[1]), float(c2[0]), float(c2[1]), mx1, my1, mx2, my2):
lidars.append(info)
break
f.close()
return lidars
def find_a_interest(x1, y1, x2, y2):
areas = []
f = open(a_file, "r")
for line in f:
info = line.split()
mx1 = float(info[0])
mx2 = float(info[2])
my1 = float(info[1])
my2 = float(info[3])
if is_collision(x1, y1, x2, y2, mx1, my1, mx2, my2):
areas.append(info)
f.close()
return areas
def is_collision(x1, y1, x2, y2, mx1, my1, mx2, my2):
# X axis
if ((x2 < mx1) or (x1 > mx2)):
return False
# Y axis
elif ((y1 < my2) or (y2 > my1)):
return False
else:
return True
def load_info(png_directory, orto_directory, lidar_directory):
load_mdt_info(png_directory)
load_orto_info(orto_directory)
load_lidar_info(lidar_directory)
| mit |
biljettshop/django-overextends | overextends/templatetags/overextends_tags.py | 3 | 6452 |
import os
from django import template
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.template.loader_tags import ExtendsNode
register = template.Library()
class OverExtendsNode(ExtendsNode):
"""
Allows the template ``foo/bar.html`` to extend ``foo/bar.html``,
given that there is another version of it that can be loaded. This
allows templates to be created in a project that extend their app
template counterparts, or even app templates that extend other app
templates with the same relative name/path.
We use our own version of ``find_template``, that uses an explict
list of template directories to search for the template, based on
the directories that the known template loaders
(``app_directories`` and ``filesystem``) use. This list gets stored
in the template context, and each time a template is found, its
absolute path gets removed from the list, so that subsequent
searches for the same relative name/path can find parent templates
in other directories, which allows circular inheritance to occur.
Django's ``app_directories``, ``filesystem``, and ``cached``
loaders are supported. The ``eggs`` loader, and any loader that
implements ``load_template_source`` with a source string returned,
should also theoretically work.
"""
def find_template(self, name, context, peeking=False):
"""
Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur.
"""
# These imports want settings, which aren't available when this
# module is imported to ``add_to_builtins``, so do them here.
from django.conf import settings
# Find the app_template_dirs (moved in Django 1.8)
import django.template.loaders.app_directories as app_directories
try:
# Django >= 1.8
get_app_template_dirs = app_directories.get_app_template_dirs
app_template_dirs = get_app_template_dirs('templates')
except AttributeError:
# Django <= 1.7
app_template_dirs = app_directories.app_template_dirs
# Find the find_template_loader function (it moved in Django 1.8)
try:
# Django >= 1.8
find_template_loader = context.template.engine.find_template_loader
except AttributeError:
# Django <= 1.7
from django.template.loader import find_template_loader
# Store a dictionary in the template context mapping template
# names to the lists of template directories available to
# search for that template. Each time a template is loaded, its
# origin directory is removed from its directories list.
context_name = "OVEREXTENDS_DIRS"
if context_name not in context:
context[context_name] = {}
if name not in context[context_name]:
all_dirs = list(settings.TEMPLATE_DIRS) + list(app_template_dirs)
# os.path.abspath is needed under uWSGI, and also ensures we
# have consistent path separators across different OSes.
context[context_name][name] = list(map(os.path.abspath, all_dirs))
# Build a list of template loaders to use. For loaders that wrap
# other loaders like the ``cached`` template loader, unwind its
# internal loaders and add those instead.
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
loaders.extend(getattr(loader, "loaders", [loader]))
# Go through the loaders and try to find the template. When
# found, removed its absolute path from the context dict so
# that it won't be used again when the same relative name/path
# is requested.
for loader in loaders:
dirs = context[context_name][name]
if not dirs:
break
try:
source, path = loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
else:
# Only remove the absolute path for the initial call in
# get_parent, and not when we're peeking during the
# second call.
if not peeking:
remove_path = os.path.abspath(path[:-len(name) - 1])
context[context_name][name].remove(remove_path)
return Template(source)
raise TemplateDoesNotExist(name)
def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
for node in template.nodelist:
if (isinstance(node, ExtendsNode) and
node.parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template
@register.tag
def overextends(parser, token):
"""
Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once "
"in the same template" % bits[0])
return OverExtendsNode(nodelist, parent_name, None)
| bsd-2-clause |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.5/site-packages/pip/_vendor/distlib/util.py | 327 | 52991 | #
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| mit |
opencord/voltha | ofagent/loxi/of13/instruction_id.py | 1 | 29700 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of13']
class instruction_id(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = instruction_id.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = instruction_id()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("instruction_id {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class apply_actions(instruction_id):
type = 4
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = apply_actions()
_type = reader.read("!H")[0]
assert(_type == 4)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("apply_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[4] = apply_actions
class experimenter(instruction_id):
subtypes = {}
type = 65535
def __init__(self, experimenter=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_arp_offload(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_arp_offload()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_arp_offload {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_arp_offload
class bsn_auto_negotiation(bsn):
type = 65535
experimenter = 6035143
subtype = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_auto_negotiation()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_auto_negotiation {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[11] = bsn_auto_negotiation
class bsn_deny(bsn):
type = 65535
experimenter = 6035143
subtype = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_deny()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_deny {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[5] = bsn_deny
class bsn_dhcp_offload(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_dhcp_offload()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_dhcp_offload {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_dhcp_offload
class bsn_disable_l3(bsn):
type = 65535
experimenter = 6035143
subtype = 13
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_l3()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 13)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_l3 {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[13] = bsn_disable_l3
class bsn_disable_split_horizon_check(bsn):
type = 65535
experimenter = 6035143
subtype = 3
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_split_horizon_check()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_split_horizon_check {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[3] = bsn_disable_split_horizon_check
class bsn_disable_src_mac_check(bsn):
type = 65535
experimenter = 6035143
subtype = 0
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_src_mac_check()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 0)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_src_mac_check {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[0] = bsn_disable_src_mac_check
class bsn_disable_vlan_counters(bsn):
type = 65535
experimenter = 6035143
subtype = 9
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_disable_vlan_counters()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 9)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_disable_vlan_counters {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[9] = bsn_disable_vlan_counters
class bsn_internal_priority(bsn):
type = 65535
experimenter = 6035143
subtype = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_internal_priority()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 12)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_internal_priority {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[12] = bsn_internal_priority
class bsn_packet_of_death(bsn):
type = 65535
experimenter = 6035143
subtype = 6
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_packet_of_death()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_packet_of_death {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[6] = bsn_packet_of_death
class bsn_permit(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_permit()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_permit {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_permit
class bsn_prioritize_pdus(bsn):
type = 65535
experimenter = 6035143
subtype = 7
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_prioritize_pdus()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 7)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_prioritize_pdus {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[7] = bsn_prioritize_pdus
class bsn_require_vlan_xlate(bsn):
type = 65535
experimenter = 6035143
subtype = 8
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_require_vlan_xlate()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 8)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_require_vlan_xlate {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[8] = bsn_require_vlan_xlate
class bsn_span_destination(bsn):
type = 65535
experimenter = 6035143
subtype = 10
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_span_destination()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_span_destination {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[10] = bsn_span_destination
class clear_actions(instruction_id):
type = 5
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = clear_actions()
_type = reader.read("!H")[0]
assert(_type == 5)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("clear_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[5] = clear_actions
class goto_table(instruction_id):
type = 1
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = goto_table()
_type = reader.read("!H")[0]
assert(_type == 1)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("goto_table {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[1] = goto_table
class meter(instruction_id):
type = 6
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = meter()
_type = reader.read("!H")[0]
assert(_type == 6)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("meter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[6] = meter
class write_actions(instruction_id):
type = 3
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_actions()
_type = reader.read("!H")[0]
assert(_type == 3)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("write_actions {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[3] = write_actions
class write_metadata(instruction_id):
type = 2
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = write_metadata()
_type = reader.read("!H")[0]
assert(_type == 2)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("write_metadata {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
instruction_id.subtypes[2] = write_metadata
| apache-2.0 |
mou4e/zirconium | tools/telemetry/telemetry/core/platform/ps_util.py | 34 | 1462 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
def GetChildPids(processes, pid):
"""Returns all child processes of |pid| from the given |processes| list.
Args:
processes: A tuple of (pid, ppid, state) as generated by ps.
pid: The pid for which to get children.
Returns:
A list of child pids.
"""
child_dict = defaultdict(list)
for curr_pid, curr_ppid, state in processes:
if 'Z' in state:
continue # Ignore zombie processes
child_dict[int(curr_ppid)].append(int(curr_pid))
queue = [pid]
child_ids = []
while queue:
parent = queue.pop()
if parent in child_dict:
children = child_dict[parent]
queue.extend(children)
child_ids.extend(children)
return child_ids
def GetPsOutputWithPlatformBackend(platform_backend, columns, pid):
"""Returns output of the 'ps' command as a list of lines.
Args:
platform_backend: The platform backend (LinuxBasedPlatformBackend or
PosixPlatformBackend).
columns: A list of require columns, e.g., ['pid', 'pss'].
pid: If not None, returns only the information of the process with the pid.
"""
args = ['ps']
args.extend(['-p', str(pid)] if pid != None else ['-e'])
for c in columns:
args.extend(['-o', c + '='])
return platform_backend.RunCommand(args).splitlines()
| bsd-3-clause |
papados/ordersys | Lib/site-packages/django/contrib/comments/forms.py | 218 | 8080 | import time
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.comments.models import Comment
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_text
from django.utils.text import get_text_list
from django.utils import timezone
from django.utils.translation import ungettext, ugettext, ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'content_type' : self.data.get("content_type", ""),
'object_pk' : self.data.get("object_pk", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if not constant_time_compare(expected_hash, actual_hash):
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
info = (content_type, object_pk, timestamp)
key_salt = "django.contrib.forms.CommentSecurityForm"
value = "-".join(info)
return salted_hmac(key_salt, value).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50)
email = forms.EmailField(label=_("Email address"))
url = forms.URLField(label=_("URL"), required=False)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_text(self.target_object._get_pk_val()),
user_name = self.cleaned_data["name"],
user_email = self.cleaned_data["email"],
user_url = self.cleaned_data["url"],
comment = self.cleaned_data["comment"],
submit_date = timezone.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if settings.COMMENTS_ALLOW_PROFANITIES == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.",
len(bad_words)) % get_text_list(
['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1])
for i in bad_words], ugettext('and')))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
| unlicense |
2014c2g2/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/browser/slideshow.py | 49 | 1928 | from browser import doc,markdown,html
def keydown(ev,path,zone,page):
if ev.keyCode in [39,40]: # key right or down : next page
show(path,zone,page+1)
ev.preventDefault()
elif ev.keyCode in [37,38]: #key left or up: previous page
show(path,zone,page-1)
ev.preventDefault()
def move_to(ev,path,zone,nb_pages):
pc = (ev.x-ev.target.left)/ev.target.width
page = round(nb_pages*pc)
new_pos = '%spx' %(ev.x-ev.target.left-(doc['tl_pos'].width/2))
# show page at specified position
show(path,zone,page)
# set new cursor position
doc['tl_pos'].style.left = new_pos
def click_on_tl_pos(ev):
# don't move if user clicks on current timeline position
ev.stopPropagation()
def show(path,zone,page=0):
src = open(path).read()
title = ''
page_num = False
while src.startswith('@'):
line_end = src.find('\n')
key,value = src[:line_end].split(' ',1)
if key=='@title':
title = value
elif key=='@pagenum':
page_num = True
src = src[line_end+1:]
zone.html = ''
pages = src.split('../..\n')
if page<0:
page = 0
elif page >= len(pages):
page = len(pages)-1
doc.unbind('keydown')
doc.bind('keydown',lambda ev:keydown(ev,path,zone,page))
body = html.DIV()
body.html = markdown.mark(pages[page])[0]
footer = html.DIV(Id="footer")
if title:
footer <= html.DIV(title,style=dict(display='inline'))
if page_num:
footer <= html.SPAN(' (%s/%s)' %(page+1,len(pages)),
style=dict(display='inline'))
timeline = html.DIV(Id='timeline')
tl_pos = html.DIV(Id='tl_pos')
timeline <= tl_pos
timeline.bind('click',lambda ev:move_to(ev,path,zone,len(pages)))
tl_pos.bind('click',click_on_tl_pos)
zone <= body+footer+timeline
tl_pos.style.left = '%spx' %(timeline.width*page/len(pages))
| gpl-2.0 |
GoogleCloudPlatform/PerfKitBenchmarker | tests/linux_benchmarks/mxnet_benchmark_test.py | 1 | 1272 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mxnet_benchmark."""
import os
import unittest
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import mxnet_benchmark
class MxnetBenchmarkTestCase(unittest.TestCase,
test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'mxnet_output.txt')
with open(path, 'r') as fp:
self.contents = fp.read()
def testParseSysbenchResult(self):
result = mxnet_benchmark._ExtractThroughput(self.contents)
self.assertEqual(result, 540.0266666666666)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/py-sphinx-rtd-theme/package.py | 5 | 1712 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySphinxRtdTheme(PythonPackage):
"""ReadTheDocs.org theme for Sphinx."""
homepage = "https://github.com/rtfd/sphinx_rtd_theme/"
url = "https://pypi.io/packages/source/s/sphinx_rtd_theme/sphinx_rtd_theme-0.1.10a0.tar.gz"
import_modules = ['sphinx_rtd_theme']
version('0.2.5b1', '0923473a43bd2527f32151f195f2a521')
version('0.1.10a0', '83bd95cae55aa8b773a8cc3a41094282')
depends_on('py-setuptools', type='build')
| lgpl-2.1 |
ethanhlc/streamlink | src/streamlink/plugins/crunchyroll.py | 2 | 11242 | import random
import re
import string
import datetime
from streamlink.plugin import Plugin, PluginError, PluginOptions
from streamlink.plugin.api import http, validate
from streamlink.stream import HLSStream
API_URL = "https://api.crunchyroll.com/{0}.0.json"
API_DEFAULT_LOCALE = "en_US"
API_USER_AGENT = "Mozilla/5.0 (iPhone; iPhone OS 8.3.0; {})"
API_HEADERS = {
"Host": "api.crunchyroll.com",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
"Content-Type": "application/x-www-form-urlencoded"
}
API_VERSION = "2313.8"
API_ACCESS_TOKEN = "QWjz212GspMHH9h"
API_DEVICE_TYPE = "com.crunchyroll.iphone"
STREAM_WEIGHTS = {
"low": 240,
"mid": 420,
"high": 720,
"ultra": 1080,
}
STREAM_NAMES = {
"120k": "low",
"328k": "mid",
"864k": "high"
}
def parse_timestamp(ts):
"""Takes ISO 8601 format(string) and converts into a utc datetime(naive)"""
return (
datetime.datetime.strptime(ts[:-7], "%Y-%m-%dT%H:%M:%S") +
datetime.timedelta(hours=int(ts[-5:-3]), minutes=int(ts[-2:])) *
int(ts[-6:-5] + "1")
)
_url_re = re.compile("""
http(s)?://(\w+\.)?crunchyroll\.
(?:
com|de|es|fr|co.jp
)
/[^/&?]+
/[^/&?]+-(?P<media_id>\d+)
""", re.VERBOSE)
_api_schema = validate.Schema({
"error": bool,
validate.optional("code"): validate.text,
validate.optional("message"): validate.text,
validate.optional("data"): object,
})
_media_schema = validate.Schema(
{
"stream_data": validate.any(
None,
{
"streams": validate.all(
[{
"quality": validate.any(validate.text, None),
"url": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
),
validate.optional("video_encode_id"): validate.text
}]
)
}
)
},
validate.get("stream_data")
)
_login_schema = validate.Schema({
"auth": validate.text,
"expires": validate.all(
validate.text,
validate.transform(parse_timestamp)
),
"user": {
"username": validate.any(validate.text, None),
"email": validate.text
}
})
_session_schema = validate.Schema(
{
"session_id": validate.text
},
validate.get("session_id")
)
class CrunchyrollAPIError(Exception):
"""Exception thrown by the Crunchyroll API when an error occurs"""
def __init__(self, msg, code):
Exception.__init__(self, msg)
self.msg = msg
self.code = code
class CrunchyrollAPI(object):
def __init__(self, session_id=None, auth=None, locale=API_DEFAULT_LOCALE):
"""Abstract the API to access to Crunchyroll data.
Can take saved credentials to use on it's calls to the API.
"""
self.session_id = session_id
self.auth = auth
self.locale = locale
def _api_call(self, entrypoint, params, schema=None):
"""Makes a call against the api.
:param entrypoint: API method to call.
:param params: parameters to include in the request data.
:param schema: schema to use to validate the data
"""
url = API_URL.format(entrypoint)
# Default params
params = dict(params)
params.update({
"version": API_VERSION,
"locale": self.locale.replace('_', ''),
})
if self.session_id:
params["session_id"] = self.session_id
# Headers
headers = dict(API_HEADERS)
headers['User-Agent'] = API_USER_AGENT.format(self.locale)
# The certificate used by Crunchyroll cannot be verified in some environments.
res = http.get(url, params=params, headers=headers, verify=False)
json_res = http.json(res, schema=_api_schema)
if json_res["error"]:
err_msg = json_res.get("message", "Unknown error")
err_code = json_res.get("code", "unknown_error")
raise CrunchyrollAPIError(err_msg, err_code)
data = json_res.get("data")
if schema:
data = schema.validate(data, name="API response")
return data
def start_session(self, device_id, **kwargs):
"""Starts a session against Crunchyroll's server.
Is recommended that you call this method before making any other calls
to make sure you have a valid session against the server.
"""
params = {
"device_id": device_id,
"device_type": API_DEVICE_TYPE,
"access_token": API_ACCESS_TOKEN,
}
if self.auth:
params["auth"] = self.auth
return self._api_call("start_session", params, **kwargs)
def login(self, username, password, **kwargs):
"""Authenticates the session to be able to access restricted data from
the server (e.g. premium restricted videos).
"""
params = {
"account": username,
"password": password
}
return self._api_call("login", params, **kwargs)
def get_info(self, media_id, fields=None, **kwargs):
"""Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
"""
params = {
"media_id": media_id
}
if fields:
params["fields"] = ",".join(fields)
return self._api_call("info", params, **kwargs)
class Crunchyroll(Plugin):
options = PluginOptions({
"username": None,
"password": None,
"purge_credentials": None,
"locale": API_DEFAULT_LOCALE
})
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = STREAM_WEIGHTS.get(key)
if weight:
return weight, "crunchyroll"
return Plugin.stream_weight(key)
def _get_streams(self):
api = self._create_api()
match = _url_re.match(self.url)
media_id = int(match.group("media_id"))
try:
info = api.get_info(media_id, fields=["media.stream_data"],
schema=_media_schema)
except CrunchyrollAPIError as err:
raise PluginError(u"Media lookup error: {0}".format(err.msg))
if not info:
return
streams = {}
# The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included
has_adaptive = any([s[u"quality"] == u"adaptive" for s in info[u"streams"]])
if has_adaptive:
self.logger.debug(u"Loading streams from adaptive playlist")
for stream in filter(lambda x: x[u"quality"] == u"adaptive", info[u"streams"]):
for q, s in HLSStream.parse_variant_playlist(self.session, stream[u"url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
# If there is no adaptive quality stream then parse each individual result
for stream in info[u"streams"]:
if stream[u"quality"] != u"adaptive":
# the video_encode_id indicates that the stream is not a variant playlist
if u"video_encode_id" in stream:
streams[stream[u"quality"]] = HLSStream(self.session, stream[u"url"])
else:
# otherwise the stream url is actually a list of stream qualities
for q, s in HLSStream.parse_variant_playlist(self.session, stream[u"url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
return streams
def _get_device_id(self):
"""Returns the saved device id or creates a new one and saves it."""
device_id = self.cache.get("device_id")
if not device_id:
# Create a random device id and cache it for a year
char_set = string.ascii_letters + string.digits
device_id = "".join(random.sample(char_set, 32))
self.cache.set("device_id", device_id, 365 * 24 * 60 * 60)
return device_id
def _create_api(self):
"""Creates a new CrunchyrollAPI object, initiates it's session and
tries to authenticate it either by using saved credentials or the
user's username and password.
"""
if self.options.get("purge_credentials"):
self.cache.set("session_id", None, 0)
self.cache.set("auth", None, 0)
current_time = datetime.datetime.utcnow()
device_id = self._get_device_id()
locale = self.options.get("locale")
api = CrunchyrollAPI(
self.cache.get("session_id"), self.cache.get("auth"), locale
)
self.logger.debug("Creating session")
try:
api.session_id = api.start_session(device_id, schema=_session_schema)
except CrunchyrollAPIError as err:
if err.code == "bad_session":
self.logger.debug("Current session has expired, creating a new one")
api = CrunchyrollAPI(locale=locale)
api.session_id = api.start_session(device_id, schema=_session_schema)
else:
raise err
# Save session and hope it lasts for a few hours
self.cache.set("session_id", api.session_id, 4 * 60 * 60)
self.logger.debug("Session created")
if api.auth:
self.logger.debug("Using saved credentials")
elif self.options.get("username"):
try:
self.logger.info("Attempting to login using username and password")
login = api.login(
self.options.get("username"),
self.options.get("password"),
schema=_login_schema
)
api.auth = login["auth"]
self.logger.info("Successfully logged in as '{0}'",
login["user"]["username"] or login["user"]["email"])
expires = (login["expires"] - current_time).total_seconds()
self.cache.set("auth", login["auth"], expires)
except CrunchyrollAPIError as err:
raise PluginError(u"Authentication error: {0}".format(err.msg))
else:
self.logger.warning(
"No authentication provided, you won't be able to access "
"premium restricted content"
)
return api
__plugin__ = Crunchyroll
| bsd-2-clause |
ayumilong/rethinkdb | test/interface/resources.py | 29 | 3022 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import os, sys, time, urllib2
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
print("Spinning up a server (%.2fs)" % (time.time() - startTime))
with driver.Process(output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server:
baseURL = 'http://%s:%d/' % (server.host, server.http_port)
print("Getting root (%.2fs)" % (time.time() - startTime))
fetchResult = urllib2.urlopen(baseURL, timeout=2)
fetchData = fetchResult.read()
assert fetchResult.getcode() == 200, 'Got a non 200 code when requesting the root: %s' % str(fetchResult.getcode())
assert fetchResult.headers['content-type'] == 'text/html'
assert '<html' in fetchData, 'Data from root did not include "html": %s' % fetchData
print("Getting invalid page (%.2fs)" % (time.time() - startTime))
# open a log file iterator and flush out the existing lines
logFile = utils.nonblocking_readline(server.logfile_path)
while next(logFile) is not None:
pass
try:
fetchResult = urllib2.urlopen(os.path.join(baseURL, 'foobar'), timeout=2)
except urllib2.HTTPError as e:
assert e.code == 403, 'Got a non 403 code when requesting bad url /foobar: %s' % str(e.code)
else:
assert False, "Did not raise a 403 error code when requesting a bad url"
print("Checking that the bad access was recorded (%.2fs)" % (time.time() - startTime))
deadline = time.time() + 2
foundIt = False
while time.time() < deadline:
thisEntry = next(logFile)
while thisEntry is not None:
if 'Someone asked for the nonwhitelisted file "/foobar"' in thisEntry:
foundIt = True
break
thisEntry = next(logFile)
if foundIt:
break
time.sleep(0.05)
else:
assert False, "Timed out waiting for the bad access marker to be written to the log"
print("Getting ajax/me (%.2fs)" % (time.time() - startTime))
fetchResult = urllib2.urlopen(os.path.join(baseURL, 'ajax/me'), timeout=2)
fetchData = fetchResult.read()
assert fetchResult.getcode() == 200, 'Got a non 200 code when requesting /me: %s' % str(fetchResult.getcode())
assert fetchResult.headers['content-type'] == 'application/json'
assert fetchData == '"%s"' % server.uuid, 'Data from ajax/me did not match the expected server uuid: %s vs %s' % (fetchData, server.uuid)
# -- ending
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 |
vertcoin/electrum-vtc | gui/kivy/uix/menus.py | 2 | 2782 | from functools import partial
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.uix.bubble import Bubble, BubbleButton
from kivy.properties import ListProperty
from kivy.uix.widget import Widget
from electrum_vtc_gui.i18n import _
class ContextMenuItem(Widget):
'''abstract class
'''
class ContextButton(ContextMenuItem, BubbleButton):
pass
class ContextMenu(Bubble):
buttons = ListProperty([_('ok'), _('cancel')])
'''List of Buttons to be displayed at the bottom'''
__events__ = ('on_press', 'on_release')
def __init__(self, **kwargs):
self._old_buttons = self.buttons
super(ContextMenu, self).__init__(**kwargs)
self.on_buttons(self, self.buttons)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
self.hide()
return
return super(ContextMenu, self).on_touch_down(touch)
def on_buttons(self, _menu, value):
if 'menu_content' not in self.ids.keys():
return
if value == self._old_buttons:
return
blayout = self.ids.menu_content
blayout.clear_widgets()
for btn in value:
ib = ContextButton(text=btn)
ib.bind(on_press=partial(self.dispatch, 'on_press'))
ib.bind(on_release=partial(self.dispatch, 'on_release'))
blayout.add_widget(ib)
self._old_buttons = value
def on_press(self, instance):
pass
def on_release(self, instance):
pass
def show(self, pos, duration=0):
Window.add_widget(self)
# wait for the bubble to adjust it's size according to text then animate
Clock.schedule_once(lambda dt: self._show(pos, duration))
def _show(self, pos, duration):
def on_stop(*l):
if duration:
Clock.schedule_once(self.hide, duration + .5)
self.opacity = 0
arrow_pos = self.arrow_pos
if arrow_pos[0] in ('l', 'r'):
pos = pos[0], pos[1] - (self.height/2)
else:
pos = pos[0] - (self.width/2), pos[1]
self.limit_to = Window
anim = Animation(opacity=1, pos=pos, d=.32)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
def hide(self, *dt):
def on_stop(*l):
Window.remove_widget(self)
anim = Animation(opacity=0, d=.25)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
def add_widget(self, widget, index=0):
if not isinstance(widget, ContextMenuItem):
super(ContextMenu, self).add_widget(widget, index)
return
menu_content.add_widget(widget, index)
| mit |
kgullikson88/TS23-Scripts | TrimData.py | 1 | 5641 | import sys
import numpy as np
import matplotlib.pyplot as plt
import FitsUtils
trimming = {
21: [0, 576.3],
22: [582.6, 9e9],
23: [583.45, 9e9],
37: [0, 680.15]}
class Trimmer:
def __init__(self, data=None):
if data != None:
self.data = data.copy()
self.clicks = []
logfile = open("trimlog.dat", "w")
logfile.close()
def InputData(self, data):
self.data = data.copy()
def Plot(self):
self.fig = plt.figure(1, figsize=(11, 10))
cid = self.fig.canvas.mpl_connect('key_press_event', self.keypress)
plt.plot(self.data.x, self.data.y)
plt.plot(self.data.x, self.data.cont)
plt.show()
return self.data.copy()
def keypress(self, event):
if event.key == "r":
print "Set to remove points. Click on the bounds"
self.clipmode = "remove"
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.mouseclick)
elif event.key == "i":
print "Set to interpolate between points. Click the bounds"
self.clipmode = "interpolate"
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.mouseclick)
def mouseclick(self, event):
self.clicks.append(event.xdata)
if len(self.clicks) == 2:
left = max(0, np.searchsorted(self.data.x, min(self.clicks)))
right = min(self.data.size() - 1, np.searchsorted(self.data.x, max(self.clicks)))
logfile = open("trimlog.dat", "a")
if self.clipmode == "remove":
logfile.write("Removing:\t%.3f to %.3f\n" % (min(self.clicks), max(self.clicks)))
self.data.x = np.delete(self.data.x, np.arange(left, right + 1))
self.data.y = np.delete(self.data.y, np.arange(left, right + 1))
self.data.cont = np.delete(self.data.cont, np.arange(left, right + 1))
self.data.err = np.delete(self.data.err, np.arange(left, right + 1))
elif self.clipmode == "interpolate":
logfile.write("Interpolating:\t%.3f to %.3f\n" % (min(self.clicks), max(self.clicks)))
x1, x2 = self.data.x[left], self.data.x[right]
y1, y2 = self.data.y[left], self.data.y[right]
m = (y2 - y1) / (x2 - x1)
self.data.y[left:right] = m * (self.data.x[left:right] - x1) + y1
self.data.cont[left:right] = m * (self.data.x[left:right] - x1) + y1
self.fig.clf()
cid = self.fig.canvas.mpl_connect('key_press_event', self.keypress)
plt.plot(self.data.x, self.data.y)
plt.plot(self.data.x, self.data.cont)
plt.draw()
self.fig.canvas.mpl_disconnect(self.clickid)
self.clicks = []
logfile.close()
def main1():
for fname in sys.argv[1:]:
if "-" in fname:
num = int(fname.split("-")[-1].split(".fits")[0])
outfilename = "%s-%i.fits" % (fname.split("-")[0], num + 1)
else:
outfilename = "%s-0.fits" % (fname.split(".fits")[0])
orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
cont="continuum")
for i, order in enumerate(orders):
if i in trimming.keys():
left = np.searchsorted(order.x, trimming[i][0])
right = np.searchsorted(order.x, trimming[i][1])
order.x = order.x[left:right]
order.y = order.y[left:right]
order.cont = order.cont[left:right]
order.err = order.err[left:right]
orders[i] = order.copy()
columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
if i == 0:
FitsUtils.OutputFitsFileExtensions(columns, fname, outfilename, mode="new")
else:
FitsUtils.OutputFitsFileExtensions(columns, outfilename, outfilename, mode="append")
if __name__ == "__main__":
trim = Trimmer()
for fname in sys.argv[1:]:
if "-" in fname:
num = int(fname.split("-")[-1].split(".fits")[0])
outfilename = "%s-%i.fits" % (fname.split("-")[0], num + 1)
else:
outfilename = "%s-0.fits" % (fname.split(".fits")[0])
orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
cont="continuum")
# trim = Trimmer(orders[0])
logfile = open("trimlog.dat", "a")
logfile.write("\n\n\n******************************************\n")
logfile.write("\nTrimming file %s\n\n" % (fname))
logfile.write("******************************************\n")
logfile.close()
for i, order in enumerate(orders):
logfile = open("trimlog.dat", "a")
logfile.write("******** Order %i ******************\n" % (i + 1))
logfile.close()
trim.InputData(order)
order = trim.Plot()
columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
if i == 0:
FitsUtils.OutputFitsFileExtensions(columns, fname, outfilename, mode="new")
else:
FitsUtils.OutputFitsFileExtensions(columns, outfilename, outfilename, mode="append")
| gpl-3.0 |
ericlink/adms-server | playframework-dist/1.1-src/python/Lib/idlelib/PyShell.py | 2 | 52614 | #! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import macosxSupport
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
None: idleConf.GetHighlight(theme, "normal"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
try:
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >> self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
if use_subprocess:
self.tkconsole.endexecuting()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<beginning-of-line>>", self.home_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response == False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def home_callback(self, event):
if event.state != 0 and event.keysym == "Home":
return # <Modifier-Home>; fall back to class binding
if self.text.compare("iomark", "<=", "insert") and \
self.text.compare("insert linestart", "<=", "iomark"):
self.text.mark_set("insert", "iomark")
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
| mit |
oyiptong/up-headliner | up/headliner/aggregator_scheduler.py | 1 | 1270 | #!/usr/bin/env python
import argparse
from celery import Celery
from celery.bin.beat import beat
from celery import maybe_patch_concurrency
from up.headliner.utils import read_config_file, setup_basic_logger
from up.headliner import Application
def get_scheduler_config():
"""
Read configuration from multiple places and start an aggregator scheduler.
Configuration is obtain with the following priority:
default config path > specified json config > up.headliner.settings
"""
parser = argparse.ArgumentParser(description="Headliner Scheduler is a program that sends tasks periodically. Tasks are configured in headliner's configuration file.")
parser.add_argument("--config", metavar="config", type=str, help="Specify a json configuration file", default=None)
options = parser.parse_args()
config = read_config_file(options)
return config
config = get_scheduler_config()
app = Application.instance(config)
aggregator = Celery("headliner", broker=app.message_broker_url, backend=app.task_results_backend_url)
def main():
setup_basic_logger()
aggregator.config_from_object(app.config.scheduler)
maybe_patch_concurrency()
beat(aggregator).execute_from_commandline()
if __name__ == "__main__":
main()
| mpl-2.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
friedrich420/Note-3-AEL-Kernel-NEW-NG2-2 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
pyupio/pyup | tests/test_bot.py | 1 | 34297 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from unittest import TestCase
from pyup.bot import Bot
from .test_pullrequest import pullrequest_factory
from pyup.updates import RequirementUpdate, InitialUpdate
from pyup.requirements import RequirementFile
from pyup.errors import NoPermissionError, ConfigError
from pyup.config import RequirementConfig
from mock import Mock, patch
def bot_factory(repo="foo/foo", user_token="foo", bot_token=None,
bot_class=Bot, ignore_ssl=False, prs=list()):
bot = bot_class(
repo=repo,
user_token=user_token,
bot_token=bot_token,
ignore_ssl=ignore_ssl,
)
bot._fetched_prs = True
bot.req_bundle.pull_requests = prs
bot.provider = Mock()
bot.config.update_config({
"close_prs": True,
"pin": True,
"branch": "base_branch",
"search": True
})
return bot
class BotUserRepoTest(TestCase):
def test_user_repo(self):
bot = bot_factory()
bot.provider.get_repo.return_value = "THE REPO"
self.assertEqual(bot.user_repo, "THE REPO")
class BotUserTest(TestCase):
def test_user(self):
bot = bot_factory()
bot.provider.get_user.return_value = "THE USER"
self.assertEqual(bot.user, "THE USER")
class BotBotTest(TestCase):
def test_bot_without_token(self):
bot = bot_factory()
bot.provider.get_user.return_value = "THE BOT"
self.assertEqual(bot.bot, "THE BOT")
def test_bot_with_token(self):
bot = bot_factory(bot_token="the foo")
bot.provider.get_user.return_value = "THE BOT"
self.assertEqual(bot.bot, "THE BOT")
class BotBotRepoTest(TestCase):
def test_bot_repo(self):
bot = bot_factory()
bot.provider.get_repo.return_value = "THE BOT REPO"
self.assertEqual(bot.bot_repo, "THE BOT REPO")
class BotPullRequestsTest(TestCase):
def test_iter_issues_called(self):
bot = bot_factory()
bot._fetched_prs = False
bot.provider.iter_issues = Mock(return_value=[])
bot.pull_requests
self.assertEqual(bot.provider.iter_issues.call_count, 1)
class BotRepoConfigTest(TestCase):
def test_fetches_file_success(self):
bot = bot_factory()
bot.provider.get_file.return_value = "foo: bar", None
self.assertEqual(bot.get_repo_config(bot.user_repo), {"foo": "bar"})
def test_yaml_error(self):
bot = bot_factory()
bot.provider.get_file.return_value = "foo: bar: baz: fii:", None
with self.assertRaises(ConfigError):
self.assertEqual(bot.get_repo_config(bot.user_repo), None)
def test_fetches_file_error(self):
bot = bot_factory()
bot.provider.get_file.return_value = None, None
self.assertEqual(bot.get_repo_config(bot.user_repo), None)
class BotConfigureTest(TestCase):
def test_kwargs(self):
bot = bot_factory()
bot.provider.get_file.return_value = None, None
bot.configure(branch="bogus-branch", pin="bogus-pin", close_prs="bogus-close")
self.assertEqual(bot.config.branch, "bogus-branch")
self.assertEqual(bot.config.pin, "bogus-pin")
self.assertEqual(bot.config.close_prs, "bogus-close")
def test_file(self):
bot = bot_factory()
bot.provider.get_file.return_value = "close_prs: bogus-close\nbranch: bogus-branch", None
bot.configure()
self.assertEqual(bot.config.branch, "bogus-branch")
self.assertEqual(bot.config.close_prs, "bogus-close")
def test_numeric_branch(self):
bot = bot_factory()
bot.provider.get_file.return_value = "branch: 2.0\n", None
bot.configure()
self.assertEqual(bot.config.branch, "2.0")
def test_write_config(self):
bot = bot_factory()
bot.provider.get_file.return_value = None, None
bot.configure(write_config={'branch': 'bogus-branch'})
self.assertEqual(bot.config.branch, "bogus-branch")
class BotUpdateTest(TestCase):
def test_branch_is_none(self):
bot = bot_factory()
bot.provider.get_default_branch.return_value = "the foo"
bot.provider.get_file.return_value = None, None
bot.get_all_requirements = Mock()
bot.apply_updates = Mock()
bot.update()
self.assertEqual(bot.config.branch, "the foo")
def test_branch_is_set(self):
bot = bot_factory()
bot.get_all_requirements = Mock()
bot.apply_updates = Mock()
bot.provider.get_file.return_value = None, None
bot.update(branch="the branch")
self.assertEqual(bot.config.branch, "the branch")
class BotApplyUpdateTest(TestCase):
def test_apply_update_pull_request_exists(self):
the_requirement = Mock()
the_pull = pullrequest_factory("The PR")
bot = bot_factory(prs=[the_pull])
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [
("The PR", "", "", [update])]
bot.apply_updates(initial=True, scheduled=False)
self.assertEqual(the_requirement.pull_request, the_pull)
def test_updates_empty(self):
bot = bot_factory()
bot.create_issue = Mock()
bot.req_bundle.get_updates = Mock(side_effect=IndexError)
bot.apply_updates(initial=True, scheduled=False)
bot.create_issue.assert_called_once_with(
title=InitialUpdate.get_title(),
body=InitialUpdate.get_empty_update_body()
)
def test_updates_empty_with_prefix(self):
bot = bot_factory()
bot.config.pr_prefix = "Some Prefix"
bot.create_issue = Mock()
bot.req_bundle.get_updates = Mock(side_effect=IndexError)
bot.apply_updates(initial=True, scheduled=False)
bot.create_issue.assert_called_once_with(
title="Some Prefix " + InitialUpdate.get_title(),
body=InitialUpdate.get_empty_update_body()
)
def test_updates_empty_with_write_config(self):
bot = bot_factory()
bot.write_config = {'foo': 'bar'}
bot.create_issue = Mock()
bot.req_bundle.get_updates = Mock(side_effect=IndexError)
bot.pull_config = Mock()
bot.apply_updates(initial=True, scheduled=False)
bot.create_issue.assert_called_once_with(
title=InitialUpdate.get_title(),
body=InitialUpdate.get_empty_update_body()
)
bot.pull_config.assert_called_once_with(
{'foo': 'bar'}
)
def test_apply_update_pull_request_new(self):
the_requirement = Mock()
the_pull = pullrequest_factory("The PR")
bot = bot_factory(prs=[the_pull])
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update])]
bot.commit_and_pull = Mock()
bot.commit_and_pull.return_value = the_pull
bot.apply_updates(initial=True, scheduled=False)
self.assertEqual(the_requirement.pull_request, the_pull)
def test_apply_update_with_prefix_pull_request_new(self):
the_requirement = Mock()
the_pull = pullrequest_factory("The PR")
bot = bot_factory(prs=[the_pull])
bot.config.pr_prefix = "Some Prefix"
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update])]
bot.commit_and_pull = Mock()
bot.commit_and_pull.return_value = the_pull
bot.apply_updates(initial=False, scheduled=False)
self.assertEqual(the_requirement.pull_request, the_pull)
bot.commit_and_pull.assert_called_once_with(
body='',
initial=False,
new_branch=u'pyup-',
title=u'Some Prefix The PR',
updates=[update]
)
def test_apply_update_with_write_config(self):
the_requirement = Mock()
the_pull = pullrequest_factory("The PR")
bot = bot_factory(prs=[the_pull])
bot.write_config = {'foo': 'bar'}
bot.pull_config = Mock()
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update])]
bot.commit_and_pull = Mock()
bot.commit_and_pull.return_value = the_pull
bot.apply_updates(initial=True, scheduled=False)
self.assertEqual(the_requirement.pull_request, the_pull)
bot.pull_config.assert_called_once_with(
{'foo': 'bar'}
)
def test_close_stall_prs_called(self):
the_requirement = Mock()
the_pull = pullrequest_factory("The PR")
bot = bot_factory(prs=[])
bot.close_stale_prs = Mock()
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update])]
bot.commit_and_pull = Mock()
bot.commit_and_pull.return_value = the_pull
bot.apply_updates(initial=False, scheduled=False)
self.assertEqual(the_requirement.pull_request, the_pull)
bot.close_stale_prs.assert_called_once_with(update=update, pull_request=the_pull,
scheduled=False)
def test_close_stall_prs_called_only_once_on_scheduled_run(self):
the_requirement = Mock()
the_pull = pullrequest_factory("Scheduled")
bot = bot_factory(prs=[])
bot.close_stale_prs = Mock()
bot.req_bundle.get_updates = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update, update])]
bot.commit_and_pull = Mock()
bot.commit_and_pull.return_value = the_pull
bot.apply_updates(initial=False, scheduled=True)
self.assertEqual(the_requirement.pull_request, the_pull)
bot.close_stale_prs.assert_called_once_with(update=update, pull_request=the_pull,
scheduled=True)
def test_apply_update_initial_empty(self):
bot = bot_factory()
bot.req_bundle.get_updates = Mock()
bot.req_bundle.get_updates.return_value = [("", "", "", [])]
bot.provider.create_issue.return_value = None
bot.apply_updates(initial=True, scheduled=False)
create_issue_args_list = bot.provider.create_issue.call_args_list
self.assertEqual(len(create_issue_args_list), 1)
self.assertEqual(
create_issue_args_list[0][1]["body"],
InitialUpdate.get_empty_update_body()
)
self.assertEqual(
create_issue_args_list[0][1]["title"],
InitialUpdate.get_title()
)
def test_apply_update_initial_pr_still_open(self):
initial_pr = pullrequest_factory(
title=InitialUpdate.get_title(),
state="open",
)
bot = bot_factory(prs=[initial_pr])
the_requirement = Mock()
update = RequirementUpdate(
requirement_file="foo", requirement=the_requirement, commit_message="foo"
)
bot.req_bundle.get_updates = Mock()
bot.req_bundle.get_updates.return_value = [("The PR", "", "", [update])]
bot.apply_updates(initial=True, scheduled=False)
self.assertEqual(bot.provider.create_pull_request.called, False)
class BotCommitAndPullTest(TestCase):
def test_multiple_updates_in_file(self):
bot = bot_factory()
bot.provider.create_branch = Mock()
bot.provider.create_commit.side_effect = [
"sha1", "sha2", "sha3"
]
bot.create_pull_request = Mock()
requirement = Mock()
requirement.update_content.return_value = "new content"
updates = [
RequirementUpdate(
requirement_file=RequirementFile(
path="foo.txt",
content='',
sha='abcd'
),
requirement=requirement,
commit_message="foo"
),
RequirementUpdate(
requirement_file=RequirementFile(
path="foo.txt",
content='',
sha='abcd'
),
requirement=requirement,
commit_message="foo"
),
RequirementUpdate(
requirement_file=RequirementFile(
path="baz.txt",
content='',
sha='xyz'
),
requirement=requirement,
commit_message="foo"
)
]
bot.commit_and_pull(True, "new branch", "repo", "", updates)
self.assertEqual(bot.provider.create_commit.called, True)
self.assertEqual(bot.provider.create_commit.call_count, 2)
create_commit_calls = bot.provider.create_commit.call_args_list
# we're looking for the sha here. Make sure that the sha got updated with the new content
self.assertEqual(create_commit_calls[0][1]["sha"], "abcd")
self.assertEqual(create_commit_calls[1][1]["sha"], "xyz")
def test_create_branch_fails(self):
bot = bot_factory()
bot.create_branch = Mock(return_value=False)
self.assertEqual(bot.commit_and_pull(None, None, None, None, None), None)
class CreateBranchTest(TestCase):
def test_success(self):
bot = bot_factory()
self.assertEqual(bot.create_branch("new-branch", delete_empty=False), True)
bot.provider.create_branch.assert_called_once_with(
base_branch="base_branch", new_branch="new-branch", repo=bot.user_repo)
def test_error_dont_delete(self):
from pyup.errors import BranchExistsError
bot = bot_factory()
bot.provider.create_branch.side_effect = BranchExistsError
self.assertEqual(bot.create_branch("new-branch", delete_empty=False), False)
bot.provider.is_empty_branch.assert_not_called()
bot.provider.delete_branch.assert_not_called()
def test_error_delete(self):
from pyup.errors import BranchExistsError
bot = bot_factory()
bot.provider.create_branch.side_effect = BranchExistsError
bot.provider.is_empty_branch.return_value = True
bot.create_branch("new-branch", delete_empty=True)
self.assertEqual(bot.provider.is_empty_branch.call_count, 1)
self.assertEqual(bot.provider.delete_branch.call_count, 1)
self.assertEqual(len(bot.provider.create_branch.mock_calls), 2)
def test_branch_not_empty(self):
from pyup.errors import BranchExistsError
bot = bot_factory()
bot.provider.create_branch.side_effect = BranchExistsError
bot.provider.is_empty_branch.return_value = False
bot.create_branch("new-branch", delete_empty=True)
self.assertEqual(bot.provider.is_empty_branch.call_count, 1)
bot.provider.delete_branch.assert_not_called()
self.assertEqual(len(bot.provider.create_branch.mock_calls), 1)
class BotGetAllRequirementsTest(TestCase):
def test_non_matching_file_not_added(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "foo.py"), # not added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, False)
def test_requirement_not_in_path(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "this/that/bla/dev.pip"), # not added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, False)
def test_file_not_ending_with_txt_or_pip(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "requirements/dev"), # not added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, False)
def test_matching_file_deep(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "requirements/dev.txt"), # added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, True)
def test_matching_file(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "requirements.txt"), # added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, True)
def test_matching_file_pip(self):
bot = bot_factory()
bot.provider.iter_git_tree.return_value = ("blob", "requirements.pip"), # added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, True)
def test_no_search(self):
bot = bot_factory()
bot.config.search = False
bot.provider.iter_git_tree.return_value = ("blob", "requirements.pip"), # added
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, False)
def test_requirement_in_config(self):
bot = bot_factory()
bot.config.search = False
bot.config.requirements = [
RequirementConfig(path="foo.txt")
]
bot.add_requirement_file = Mock()
bot.get_all_requirements()
self.assertEqual(bot.add_requirement_file.called, True)
bot.add_requirement_file.assert_called_once_with("foo.txt", sha=None)
class BotAddRequirementFileTest(TestCase):
def test_file_is_in_path(self):
bot = bot_factory()
bot.req_bundle.has_file_in_path = Mock()
bot.req_bundle.append = Mock()
bot.req_bundle.has_file_in_path.return_value = True
bot.add_requirement_file("path",)
self.assertEqual(bot.provider.get_requirement_file.called, False)
self.assertEqual(bot.req_bundle.append.called, False)
def test_file_not_found(self):
bot = bot_factory()
bot.req_bundle.has_file_in_path = Mock()
bot.req_bundle.append = Mock()
bot.provider.get_requirement_file.return_value = None
bot.req_bundle.has_file_in_path.return_value = False
bot.add_requirement_file("path",)
self.assertEqual(bot.provider.get_requirement_file.called, True)
self.assertEqual(bot.req_bundle.append.called, False)
def test_file_found_single(self):
bot = bot_factory()
bot.req_bundle.has_file_in_path = Mock()
bot.req_bundle.append = Mock()
req_file = RequirementFile("path", "")
bot.provider.get_requirement_file.return_value = req_file
bot.req_bundle.has_file_in_path.return_value = False
bot.add_requirement_file("path",)
self.assertEqual(bot.provider.get_requirement_file.called, True)
self.assertEqual(bot.req_bundle.append.called, True)
def test_file_found_with_reference(self):
bot = bot_factory()
bot.req_bundle.has_file_in_path = Mock()
bot.req_bundle.append = Mock()
req_file = RequirementFile("path", "-r foo.txt")
bot.provider.get_requirement_file.side_effect = [req_file, None]
bot.req_bundle.has_file_in_path.return_value = False
bot.add_requirement_file("path")
self.assertEqual(bot.provider.get_requirement_file.called, True)
self.assertEqual(bot.req_bundle.append.called, True)
class BotCanPullTest(TestCase):
def test_valid_schedule_but_unscheduled_run(self):
bot = bot_factory(bot_token=None)
bot.config.is_valid_schedule = Mock()
bot.config.is_valid_schedule.return_value = True
self.assertFalse(bot.can_pull(False, False))
def test_valid_schedule_and_scheduled_run(self):
bot = bot_factory(bot_token=None)
bot.config.is_valid_schedule = Mock()
bot.config.is_valid_schedule.return_value = True
self.assertTrue(bot.can_pull(False, True))
def test_no_schedule(self):
bot = bot_factory(bot_token=None)
bot.config.is_valid_schedule = Mock()
bot.config.is_valid_schedule.return_value = False
self.assertTrue(bot.can_pull(False, False))
self.assertTrue(bot.can_pull(False, True))
def test_initial(self):
bot = bot_factory(bot_token=None)
bot.config.is_valid_schedule = Mock()
bot.config.is_valid_schedule.return_value = False
self.assertTrue(bot.can_pull(True, False))
self.assertTrue(bot.can_pull(True, True))
class BotCreatePullRequestTest(TestCase):
def test_plain(self):
bot = bot_factory(bot_token=None)
bot._bot_repo = "BOT REPO"
bot._user_repo = "USER REPO"
bot.create_pull_request("title", "body", "new_branch")
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "USER REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
def test_bot_no_errors(self):
bot = bot_factory(bot_token="foo")
bot._bot_repo = "BOT REPO"
bot._user_repo = "USER REPO"
bot.create_pull_request("title", "body", "new_branch")
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "BOT REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
self.assertEqual(bot.provider.get_pull_request_permissions.called, False)
def test_bot_permission_error_resolved(self):
bot = bot_factory(bot_token="foo")
bot.provider.create_pull_request.side_effect = [NoPermissionError, "the foo"]
bot._bot_repo = "BOT REPO"
bot._user_repo = "USER REPO"
bot.create_pull_request("title", "body", "new_branch")
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "BOT REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
self.assertEqual(bot.provider.create_pull_request.call_args_list[1][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "BOT REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
def test_bot_permission_error_not_resolved(self):
bot = bot_factory(bot_token="foo")
bot.provider.create_pull_request.side_effect = [NoPermissionError, NoPermissionError]
bot._bot_repo = "BOT REPO"
bot._user_repo = "USER REPO"
with self.assertRaises(NoPermissionError):
bot.create_pull_request("title", "body", "new_branch")
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "BOT REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
self.assertEqual(bot.provider.create_pull_request.call_args_list[1][1], {
"base_branch": "base_branch",
"new_branch": "new_branch",
"repo": "BOT REPO",
"body": "body",
"title": "title",
"pr_label": False,
"assignees": [],
"config": bot.config
})
class CloseStalePRsTestCase(TestCase):
def setUp(self):
self.pr = Mock()
self.pr.title = "First PR"
self.pr.canonical_title.return_value = "First PR"
self.pr.number = 100
self.pr.type = "update"
self.pr.is_update = True
self.pr.is_initial = False
self.update = Mock()
self.update.requirement.key = "some-req"
self.other_pr = Mock()
self.other_pr.type = "update"
self.other_pr.is_open = True
self.other_pr.title = "Second PR"
self.other_pr.canonical_title.return_value = "Second PR"
self.other_pr.get_requirement.return_value = "some-req"
self.other_pr.is_update = True
self.other_pr.is_initial = False
def test_scheduled_closing_scheduled(self):
self.pr.is_scheduled = True
self.other_pr.is_scheduled = True
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, True)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo,
self.other_pr)
def test_scheduled_closing_update(self):
self.pr.is_scheduled = True
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, True)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo,
self.other_pr)
def test_no_bot_token(self):
bot = bot_factory()
self.pr.type = Mock()
bot.close_stale_prs(self.update, self.pr, False)
self.assertEqual(self.pr.type.call_count, 0)
def test_no_pull_requests(self):
bot = bot_factory(bot_token="foo")
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_not_called()
def test_close_success(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo, self.other_pr)
bot.provider.close_pull_request.assert_called_once_with(
bot_repo=bot.bot_repo,
user_repo=bot.user_repo,
pull_request=self.other_pr,
comment="Closing this in favor of #100",
prefix="pyup-"
)
def test_close_integration(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
bot.integration = True
bot.provider.integration = True
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo, self.other_pr)
bot.provider.close_pull_request.assert_called_once_with(
bot_repo=bot.bot_repo,
user_repo=bot.user_repo,
pull_request=self.other_pr,
comment="Closing this in favor of #100",
prefix="pyup-"
)
def test_close_success_with_prefix(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
bot.config.pr_prefix = "Some Prefix"
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo, self.other_pr)
bot.provider.close_pull_request.assert_called_once_with(
bot_repo=bot.bot_repo,
user_repo=bot.user_repo,
pull_request=self.other_pr,
comment="Closing this in favor of #100",
prefix="pyup-"
)
def test_wrong_pr_type(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
self.other_pr.is_update = False
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_not_called()
bot.provider.close_pull_request.assert_not_called()
def test_pr_closed(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
self.other_pr.is_open = False
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_not_called()
bot.provider.close_pull_request.assert_not_called()
def test_same_title(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
self.other_pr.title = "First PR"
self.other_pr.canonical_title.return_value = "First PR"
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_not_called()
bot.provider.close_pull_request.assert_not_called()
def test_requirement_doesnt_match(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
self.other_pr.get_requirement.return_value = "other-req"
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_not_called()
bot.provider.close_pull_request.assert_not_called()
def test_more_than_one_committer(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
commiter, commiter1 = Mock(), Mock()
bot.provider.get_pull_request_committer.return_value = [commiter, commiter1]
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo, self.other_pr)
bot.provider.close_pull_request.assert_not_called()
def test_committer_is_not_bot_user(self):
bot = bot_factory(bot_token="foo", prs=[self.other_pr])
commiter = Mock()
bot.provider.get_pull_request_committer.return_value = [commiter]
bot.provider.is_same_user.return_value = False
bot.close_stale_prs(self.update, self.pr, False)
bot.provider.get_pull_request_committer.assert_called_once_with(bot.user_repo, self.other_pr)
bot.provider.close_pull_request.assert_not_called()
class ConflictingUpdateTest(TestCase):
def test_no_conflict(self):
bot = bot_factory()
update1 = Mock()
update1.requirement.key = "pkg"
update1.requirement.latest_version_within_specs = "1.0"
update2 = Mock()
update2.requirement.key = "other-pkg"
update1.requirement.latest_version_within_specs = "1.0"
bot.iter_updates = Mock(return_value=[
[None, None, None, [update1]],
[None, None, None, [update2]]
])
self.assertFalse(
bot.has_conflicting_update(update1)
)
def test_has_conflict(self):
bot = bot_factory()
update1 = Mock()
update1.requirement.key = "pkg"
update1.requirement.latest_version_within_specs = "1.0"
update2 = Mock()
update2.requirement.key = "pkg"
update1.requirement.latest_version_within_specs = "1.4"
bot.iter_updates = Mock(return_value=[
[None, None, None, [update1]],
[None, None, None, [update2]]
])
self.assertTrue(
bot.has_conflicting_update(update1)
)
def test_fool_loop(self):
bot = bot_factory()
update1 = Mock()
update1.requirement.key = "google-api-python-client"
update1.requirement.latest_version_within_specs = "1.5.3"
update1.commit_message = "Update google-api-python-client from 1.5.1 to 1.5.3"
update2 = Mock()
update2.requirement.key = "google-api-python-client"
update2.requirement.latest_version_within_specs = "1.5.3"
update2.commit_message = "Pin google-api-python-client to latest version 1.5.3"
bot.iter_updates = Mock(return_value=[
[None, None, None, [update1]],
[None, None, None, [update2]]
])
self.assertTrue(
bot.has_conflicting_update(update1)
)
class IgnoreSslTest(TestCase):
def test_ignore_ssl_default_false(self):
bot = Bot(repo='foo/foo', user_token='foo')
self.assertFalse(bot.provider.ignore_ssl)
def test_ignore_ssl_true(self):
bot = Bot(repo='foo/foo', user_token='foo', ignore_ssl=True)
self.assertTrue(bot.provider.ignore_ssl)
| mit |
kaeff/pixelated-user-agent | service/test/functional/features/steps/mail_list.py | 5 | 4426 | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from common import *
from selenium.common.exceptions import NoSuchElementException
def find_current_mail(context):
print 'searching for mail [%s]' % context.current_mail_id
return find_element_by_id(context, '%s' % context.current_mail_id)
def check_current_mail_is_visible(context):
find_current_mail(context)
def open_current_mail(context):
e = find_current_mail(context)
e.click()
def get_first_email(context):
return wait_until_elements_are_visible_by_locator(context, (By.CSS_SELECTOR, '#mail-list li span a'))[0]
@then('I see that mail under the \'{tag}\' tag')
def impl(context, tag):
context.execute_steps("when I select the tag '%s'" % tag)
context.execute_steps(u'When I open the first mail in the mail list')
@when('I open that mail')
def impl(context):
find_current_mail(context).click()
@when('I open the first mail in the mail list')
def impl(context):
# it seems page is often still loading so staleness exceptions happen often
context.current_mail_id = 'mail-' + execute_ignoring_staleness(lambda: get_first_email(context).get_attribute('href').split('/')[-1])
execute_ignoring_staleness(lambda: get_first_email(context).click())
@when('I open the first mail in the \'{tag}\'')
def impl(context, tag):
context.execute_steps(u"When I select the tag '%s'" % tag)
context.execute_steps(u'When I open the first mail in the mail list')
@when('I open the mail I previously tagged')
def impl(context):
open_current_mail(context)
@then('I see the mail I sent')
def impl(context):
src = context.browser.page_source
assert context.reply_subject in src
@then('the deleted mail is there')
def impl(context):
mail_list_with_subject_exists(context, context.last_subject)
@given('I have mails')
def impl(context):
emails = wait_until_elements_are_visible_by_locator(context, (By.CSS_SELECTOR, '#mail-list li span a'))
assert len(emails) > 0
@when('I mark the first unread email as read')
def impl(context):
emails = wait_until_elements_are_visible_by_locator(context, (By.CSS_SELECTOR, '#mail-list li'))
for email in emails:
if 'status-read' not in email.get_attribute('class'):
context.current_mail_id = email.get_attribute('id') # we need to get the mail id before manipulating the page
email.find_element_by_tag_name('input').click()
find_element_by_id(context, 'mark-selected-as-read').click()
break
wait_until_elements_are_visible_by_locator(context, (By.CSS_SELECTOR, '#%s.status-read' % context.current_mail_id))
@when('I delete the email')
def impl(context):
def last_email():
return wait_until_element_is_visible_by_locator(context, (By.CSS_SELECTOR, '#mail-list li'))
mail = last_email()
context.current_mail_id = mail.get_attribute('id')
mail.find_element_by_tag_name('input').click()
find_element_by_id(context, 'delete-selected').click()
_wait_for_mail_list_to_be_empty(context)
def _wait_for_mail_list_to_be_empty(context):
wait_for_loading_to_finish(context)
def mail_list_is_empty(_):
with ImplicitWait(context, timeout=0.1):
try:
return 0 == len(context.browser.find_elements_by_css_selector('#mail-list li'))
except TimeoutException:
return False
wait_for_condition(context, mail_list_is_empty)
@when('I check all emails')
def impl(context):
find_element_by_id(context, 'toggle-check-all-emails').click()
@when('I delete them permanently')
def impl(context):
find_element_by_id(context, 'delete-selected').click()
@then('I should not see any email')
def impl(context):
_wait_for_mail_list_to_be_empty(context)
| agpl-3.0 |
ahojjati/grr | lib/flows/general/file_finder.py | 6 | 12587 | #!/usr/bin/env python
"""Search for certain files, filter them by given criteria and do something."""
import stat
from grr.lib import aff4
from grr.lib import flow
from grr.lib import utils
from grr.lib.flows.general import filesystem
from grr.lib.flows.general import fingerprint
from grr.lib.flows.general import transfer
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class FileFinderModificationTimeCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderModificationTimeCondition
class FileFinderAccessTimeCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderAccessTimeCondition
class FileFinderInodeChangeTimeCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderInodeChangeTimeCondition
class FileFinderSizeCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderSizeCondition
class FileFinderContentsRegexMatchCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderContentsRegexMatchCondition
class FileFinderContentsLiteralMatchCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderContentsLiteralMatchCondition
class FileFinderCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderCondition
class FileFinderDownloadActionOptions(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderDownloadActionOptions
class FileFinderAction(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderAction
class FileFinderArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderArgs
class FileFinderResult(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.FileFinderResult
class FileFinder(transfer.MultiGetFileMixin,
fingerprint.FingerprintFileMixin,
filesystem.GlobMixin,
flow.GRRFlow):
"""This flow looks for files matching given criteria and acts on them.
FileFinder searches for files that match glob expressions. The "action"
(e.g. Download) is applied to files that match all given "conditions".
Matches are then written to the results collection. If there are no
"conditions" specified, "action" is just applied to all found files.
FileFinder replaces these deprecated flows: FetchFiles, FingerprintFile
and SearchFileContent.
"""
friendly_name = "File Finder"
category = "/Filesystem/"
args_type = FileFinderArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
@classmethod
def GetDefaultArgs(cls, token=None):
_ = token
return cls.args_type(paths=[r"c:\windows\system32\notepad.*"])
def Initialize(self):
super(FileFinder, self).Initialize()
type_enum = FileFinderCondition.Type
# For every condition type we specify a tuple (handle, weight).
# Conditions will be sorted by weight, so that the ones with the minimal
# weight will be executed earlier.
self.condition_handlers = {
type_enum.MODIFICATION_TIME: (self.ModificationTimeCondition, 0),
type_enum.ACCESS_TIME: (self.AccessTimeCondition, 0),
type_enum.INODE_CHANGE_TIME: (self.InodeChangeTimeCondition, 0),
type_enum.SIZE: (self.SizeCondition, 0),
type_enum.CONTENTS_REGEX_MATCH: (self.ContentsRegexMatchCondition, 1),
type_enum.CONTENTS_LITERAL_MATCH: (
self.ContentsLiteralMatchCondition, 1)
}
def _ConditionWeight(self, condition_options):
_, condition_weight = self.condition_handlers[
condition_options.condition_type]
return condition_weight
@flow.StateHandler()
def Start(self):
"""Issue the find request."""
super(FileFinder, self).Start()
if not self.args.paths:
# Nothing to do.
return
self.state.Register("files_found", 0)
self.state.Register("sorted_conditions",
sorted(self.args.conditions, key=self._ConditionWeight))
self.state.file_size = self.args.file_size
if self.args.pathtype in (rdf_paths.PathSpec.PathType.MEMORY,
rdf_paths.PathSpec.PathType.REGISTRY):
# Memory and Registry StatEntries won't pass the file type check.
self.args.no_file_type_check = True
if self.args.pathtype == rdf_paths.PathSpec.PathType.MEMORY:
# If pathtype is MEMORY, we're treating provided paths not as globs,
# but as paths to memory devices.
for path in self.args.paths:
pathspec = rdf_paths.PathSpec(
path=utils.SmartUnicode(path),
pathtype=rdf_paths.PathSpec.PathType.MEMORY)
aff4path = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
pathspec, self.client_id)
stat_entry = rdf_client.StatEntry(aff4path=aff4path, pathspec=pathspec)
self.ApplyCondition(FileFinderResult(stat_entry=stat_entry),
condition_index=0)
else:
self.GlobForPaths(self.args.paths, pathtype=self.args.pathtype,
no_file_type_check=self.args.no_file_type_check)
def GlobReportMatch(self, response):
"""This method is called by the glob mixin when there is a match."""
super(FileFinder, self).GlobReportMatch(response)
self.ApplyCondition(FileFinderResult(stat_entry=response),
condition_index=0)
def ModificationTimeCondition(self, response, condition_options,
condition_index):
"""Applies modification time condition to responses."""
settings = condition_options.modification_time
if (settings.min_last_modified_time.AsSecondsFromEpoch() <=
response.stat_entry.st_mtime <=
settings.max_last_modified_time.AsSecondsFromEpoch()):
self.ApplyCondition(response, condition_index + 1)
def AccessTimeCondition(self, response, condition_options, condition_index):
"""Applies access time condition to responses."""
settings = condition_options.access_time
if (settings.min_last_access_time.AsSecondsFromEpoch() <=
response.stat_entry.st_atime <=
settings.max_last_access_time.AsSecondsFromEpoch()):
self.ApplyCondition(response, condition_index + 1)
def InodeChangeTimeCondition(self, response, condition_options,
condition_index):
"""Applies inode change time condition to responses."""
settings = condition_options.inode_change_time
if (settings.min_last_inode_change_time.AsSecondsFromEpoch() <=
response.stat_entry.st_ctime <=
settings.max_last_inode_change_time.AsSecondsFromEpoch()):
self.ApplyCondition(response, condition_index + 1)
def SizeCondition(self, response, condition_options, condition_index):
"""Applies size condition to responses."""
if not (self.args.no_file_type_check or
stat.S_ISREG(response.stat_entry.st_mode)):
return
if (condition_options.size.min_file_size <=
response.stat_entry.st_size <=
condition_options.size.max_file_size):
self.ApplyCondition(response, condition_index + 1)
def ContentsRegexMatchCondition(self, response, condition_options,
condition_index):
"""Applies contents regex condition to responses."""
if not (self.args.no_file_type_check or
stat.S_ISREG(response.stat_entry.st_mode)):
return
options = condition_options.contents_regex_match
grep_spec = rdf_client.GrepSpec(
target=response.stat_entry.pathspec,
regex=options.regex,
mode=options.mode,
start_offset=options.start_offset,
length=options.length,
bytes_before=options.bytes_before,
bytes_after=options.bytes_after)
self.CallClient(
"Grep", request=grep_spec, next_state="ProcessGrep",
request_data=dict(
original_result=response,
condition_index=condition_index + 1))
def ContentsLiteralMatchCondition(self, response, condition_options,
condition_index):
"""Applies literal match condition to responses."""
if not (self.args.no_file_type_check or
stat.S_ISREG(response.stat_entry.st_mode)):
return
options = condition_options.contents_literal_match
grep_spec = rdf_client.GrepSpec(
target=response.stat_entry.pathspec,
literal=options.literal,
mode=options.mode,
start_offset=options.start_offset,
length=options.length,
bytes_before=options.bytes_before,
bytes_after=options.bytes_after,
xor_in_key=options.xor_in_key,
xor_out_key=options.xor_out_key)
self.CallClient(
"Grep", request=grep_spec, next_state="ProcessGrep",
request_data=dict(
original_result=response,
condition_index=condition_index + 1))
@flow.StateHandler()
def ProcessGrep(self, responses):
for response in responses:
if "original_result" not in responses.request_data:
raise RuntimeError("Got a buffer reference, but original result "
"is missing")
condition_index = responses.request_data["condition_index"]
original_result = responses.request_data["original_result"]
original_result.matches.append(response)
self.ApplyCondition(original_result, condition_index)
def ApplyCondition(self, response, condition_index):
"""Applies next condition to responses."""
if condition_index >= len(self.state.sorted_conditions):
# All conditions satisfied, do the action now.
self.ProcessAction(response)
else:
# Apply the next condition handler.
condition_options = self.state.sorted_conditions[condition_index]
condition_handler, _ = self.condition_handlers[
condition_options.condition_type]
condition_handler(response, condition_options, condition_index)
def ProcessAction(self, response):
"""Applies action specified by user to responses."""
action = self.state.args.action.action_type
if action == FileFinderAction.Action.STAT:
# If action is STAT, we already have all the data we need to send the
# response.
self.state.files_found += 1
self.SendReply(response)
elif (self.args.no_file_type_check or
stat.S_ISREG(response.stat_entry.st_mode)):
# Hashing and downloading only makes sense for regular files. Reply is
# sent only when we get file's hash.
self.state.files_found += 1
if action == FileFinderAction.Action.HASH:
self.FingerprintFile(response.stat_entry.pathspec,
request_data=dict(original_result=response))
elif action == FileFinderAction.Action.DOWNLOAD:
# If the binary is too large we don't download it, but take a
# fingerprint instead.
file_size = response.stat_entry.st_size
if file_size > self.args.action.download.max_size:
self.Log("%s too large to fetch. Size=%d",
response.stat_entry.pathspec.CollapsePath(), file_size)
self.FingerprintFile(response.stat_entry.pathspec,
request_data=dict(original_result=response))
else:
self.StartFileFetch(response.stat_entry.pathspec,
request_data=dict(original_result=response))
def ReceiveFileFingerprint(self, urn, hash_obj, request_data=None):
"""Handle hash results from the FingerprintFileMixin."""
if "original_result" in request_data:
result = request_data["original_result"]
result.hash_entry = hash_obj
self.SendReply(result)
else:
raise RuntimeError("Got a fingerprintfileresult, but original result "
"is missing")
def ReceiveFetchedFile(self, unused_stat_entry, file_hash,
request_data=None):
"""Handle downloaded file from MultiGetFileMixin."""
if "original_result" not in request_data:
raise RuntimeError("Got fetched file data, but original result "
"is missing")
result = request_data["original_result"]
result.hash_entry = file_hash
self.SendReply(result)
@flow.StateHandler()
def End(self, responses):
super(FileFinder, self).End()
self.Log("Found and processed %d files.", self.state.files_found)
if self.runner.output is not None:
urn = self.runner.output.urn
else:
urn = self.client_id
self.Notify("ViewObject", urn,
"Found and processed %d files." % self.state.files_found)
| apache-2.0 |
spring-week-topos/horizon-week | openstack_dashboard/dashboards/project/containers/browsers.py | 8 | 1215 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import browsers
from openstack_dashboard.dashboards.project.containers import tables
class ContainerBrowser(browsers.ResourceBrowser):
name = "swift"
verbose_name = _("Swift")
navigation_table_class = tables.ContainersTable
content_table_class = tables.ObjectsTable
navigable_item_name = _("Container")
navigation_kwarg_name = "container_name"
content_kwarg_name = "subfolder_path"
has_breadcrumb = True
breadcrumb_url = "horizon:project:containers:index"
| apache-2.0 |
joshuajan/odoo | addons/account_sequence/account_sequence_installer.py | 39 | 3904 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_sequence_installer(osv.osv_memory):
_name = 'account.sequence.installer'
_inherit = 'res.config.installer'
_columns = {
'name': fields.char('Name',size=64, required=True),
'prefix': fields.char('Prefix',size=64, help="Prefix value of the record for the sequence"),
'suffix': fields.char('Suffix',size=64, help="Suffix value of the record for the sequence"),
'number_next': fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_increment': fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : fields.integer('Number padding', required=True, help="OpenERP will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'padding' : 0,
'name': 'Internal Sequence Journal',
}
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
record = self.browse(cr, uid, ids, context=context)[0]
j_ids = []
if record.company_id:
company_id = record.company_id.id,
search_criteria = [('company_id', '=', company_id)]
else:
company_id = False
search_criteria = []
vals = {
'id': 'internal_sequence_journal',
'code': 'account.journal',
'name': record.name,
'prefix': record.prefix,
'suffix': record.suffix,
'number_next': record.number_next,
'number_increment': record.number_increment,
'padding' : record.padding,
'company_id': company_id,
}
obj_sequence = self.pool.get('ir.sequence')
ir_seq = obj_sequence.create(cr, uid, vals, context)
res = super(account_sequence_installer, self).execute(cr, uid, ids, context=context)
jou_obj = self.pool.get('account.journal')
journal_ids = jou_obj.search(cr, uid, search_criteria, context=context)
for journal in jou_obj.browse(cr, uid, journal_ids, context=context):
if not journal.internal_sequence_id:
j_ids.append(journal.id)
if j_ids:
jou_obj.write(cr, uid, j_ids, {'internal_sequence_id': ir_seq})
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.set(cr, uid, key='default', key2=False, name='internal_sequence_id', models =[('account.journal', False)], value=ir_seq)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AustereCuriosity/numpy | numpy/linalg/linalg.py | 2 | 75877 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
htzy/bigfour | common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py | 113 | 6432 | import dogstats_wrapper as dog_stats_api
import logging
from .grading_service_module import GradingService
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to controller query backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(ControllerQueryService, self).__init__(config)
self.url = config['url'] + config['grading_controller']
self.login_url = self.url + '/login/'
self.check_eta_url = self.url + '/get_submission_eta/'
self.combined_notifications_url = self.url + '/combined_notifications/'
self.grading_status_list_url = self.url + '/get_grading_status_list/'
self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/'
self.take_action_on_flags_url = self.url + '/take_action_on_flags/'
def check_for_eta(self, location):
params = {
'location': location,
}
data = self.get(self.check_eta_url, params)
self._record_result('check_for_eta', data)
dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0))
return data
def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
'user_is_staff': user_is_staff,
'last_time_viewed': last_time_viewed,
}
log.debug(self.combined_notifications_url)
data = self.get(self.combined_notifications_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)]
tags.extend(
u'{}:{}'.format(key, value)
for key, value in data.items()
if key not in ('success', 'version', 'error')
)
self._record_result('check_combined_notifications', data, tags)
return data
def get_grading_status_list(self, course_id, student_id):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.grading_status_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_grading_status_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_grading_status_list.length'),
len(data.get('problem_list', [])),
tags=tags
)
return data
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.flagged_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_flagged_problem_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_flagged_problem_list.length'),
len(data.get('flagged_submissions', []))
)
return data
def take_action_on_flags(self, course_id, student_id, submission_id, action_type):
params = {
'course_id': course_id.to_deprecated_string(),
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
data = self.post(self.take_action_on_flags_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)]
self._record_result('take_action_on_flags', data, tags)
return data
class MockControllerQueryService(object):
"""
Mock controller query service for testing
"""
def __init__(self, config, render_template):
pass
def check_for_eta(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_combined_notifications(self, *args, **kwargs):
combined_notifications = {
"flagged_submissions_exist": False,
"version": 1,
"new_student_grading_to_view": False,
"success": True,
"staff_needs_to_grade": False,
"student_needs_to_peer_grade": True,
"overall_need_to_check": True
}
return combined_notifications
def get_grading_status_list(self, *args, **kwargs):
grading_status_list = {
"version": 1,
"problem_list": [
{
"problem_name": "Science Question -- Machine Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"
}, {
"problem_name": "Humanities Question -- Peer Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"
}
],
"success": True
}
return grading_status_list
def get_flagged_problem_list(self, *args, **kwargs):
flagged_problem_list = {
"version": 1,
"success": False,
"error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"
}
return flagged_problem_list
def take_action_on_flags(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
return human_string
| agpl-3.0 |
camptocamp/ngo-addons-backport | addons/account/project/report/inverted_analytic_balance.py | 56 | 5710 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import pooler
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
report_sxw.report_sxw('report.account.analytic.account.inverted.balance', 'account.analytic.account', 'addons/account/project/report/inverted_analytic_balance.rml',parser=account_inverted_analytic_balance, header="internal")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
camsong/node-gyp | gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
streamlink/streamlink | tests/test_cli_main.py | 3 | 24369 | import datetime
import os
import sys
import tempfile
import unittest
from pathlib import Path, PosixPath, WindowsPath
from unittest.mock import Mock, call, patch
import freezegun
import streamlink_cli.main
import tests.resources
from streamlink.plugin.plugin import Plugin
from streamlink.session import Streamlink
from streamlink_cli.compat import DeprecatedPath, is_win32
from streamlink_cli.main import (
NoPluginError,
check_file_output,
create_output,
format_valid_streams,
handle_stream,
handle_url,
log_current_arguments,
resolve_stream_name,
setup_config_args
)
from streamlink_cli.output import FileOutput, PlayerOutput
class FakePlugin:
@classmethod
def stream_weight(cls, stream):
return Plugin.stream_weight(stream)
class TestCLIMain(unittest.TestCase):
def test_check_file_output(self):
streamlink_cli.main.console = Mock()
self.assertIsInstance(check_file_output("test", False), FileOutput)
def test_check_file_output_exists(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = console = Mock()
streamlink_cli.main.sys.stdin = stdin = Mock()
stdin.isatty.return_value = True
console.ask.return_value = "y"
self.assertTrue(os.path.exists(tmpfile.name))
self.assertIsInstance(check_file_output(tmpfile.name, False), FileOutput)
finally:
tmpfile.close()
def test_check_file_output_exists_notty(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = Mock()
streamlink_cli.main.sys.stdin = stdin = Mock()
stdin.isatty.return_value = False
self.assertTrue(os.path.exists(tmpfile.name))
self.assertRaises(SystemExit, check_file_output, tmpfile.name, False)
finally:
tmpfile.close()
def test_check_file_output_exists_force(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = Mock()
self.assertTrue(os.path.exists(tmpfile.name))
self.assertIsInstance(check_file_output(tmpfile.name, True), FileOutput)
finally:
tmpfile.close()
@patch('sys.exit')
def test_check_file_output_exists_no(self, sys_exit):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = console = Mock()
console.ask.return_value = "n"
self.assertTrue(os.path.exists(tmpfile.name))
check_file_output(tmpfile.name, False)
sys_exit.assert_called_with()
finally:
tmpfile.close()
def test_resolve_stream_name(self):
a = Mock()
b = Mock()
c = Mock()
d = Mock()
e = Mock()
streams = {
"160p": a,
"360p": b,
"480p": c,
"720p": d,
"1080p": e,
"worst": b,
"best": d,
"worst-unfiltered": a,
"best-unfiltered": e
}
self.assertEqual(resolve_stream_name(streams, "unknown"), "unknown")
self.assertEqual(resolve_stream_name(streams, "160p"), "160p")
self.assertEqual(resolve_stream_name(streams, "360p"), "360p")
self.assertEqual(resolve_stream_name(streams, "480p"), "480p")
self.assertEqual(resolve_stream_name(streams, "720p"), "720p")
self.assertEqual(resolve_stream_name(streams, "1080p"), "1080p")
self.assertEqual(resolve_stream_name(streams, "worst"), "360p")
self.assertEqual(resolve_stream_name(streams, "best"), "720p")
self.assertEqual(resolve_stream_name(streams, "worst-unfiltered"), "160p")
self.assertEqual(resolve_stream_name(streams, "best-unfiltered"), "1080p")
def test_format_valid_streams(self):
a = Mock()
b = Mock()
c = Mock()
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst": b,
"best": c
}
self.assertEqual(
format_valid_streams(FakePlugin, streams),
", ".join([
"audio",
"720p (worst)",
"1080p (best)"
])
)
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst-unfiltered": b,
"best-unfiltered": c
}
self.assertEqual(
format_valid_streams(FakePlugin, streams),
", ".join([
"audio",
"720p (worst-unfiltered)",
"1080p (best-unfiltered)"
])
)
@patch("streamlink_cli.main.args", stream_url=True, subprocess_cmdline=False)
@patch("streamlink_cli.main.console", json=True)
def test_handle_stream_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(best=stream)
plugin = Mock(FakePlugin(), module="fake", arguments=[], streams=Mock(return_value=streams))
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(stream)])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
console.json = False
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [call("{0}", stream.to_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_url.side_effect = TypeError()
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
@patch("streamlink_cli.main.args", stream_url=True, stream=[], default_stream=[], retry_max=0, retry_streams=0)
@patch("streamlink_cli.main.console", json=True)
def test_handle_url_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(worst=Mock(), best=stream)
plugin = Mock(FakePlugin(), module="fake", arguments=[], streams=Mock(return_value=streams))
with patch("streamlink_cli.main.streamlink", resolve_url=Mock(return_value=plugin)):
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(dict(plugin="fake", streams=streams))])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
console.json = False
handle_url()
self.assertEqual(console.msg.mock_calls, [call("{0}", stream.to_manifest_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_manifest_url.side_effect = TypeError()
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
console.exit.mock_calls.clear()
def test_create_output_no_file_output_options(self):
streamlink_cli.main.console = Mock()
streamlink_cli.main.args = args = Mock()
args.output = None
args.stdout = None
args.record = None
args.record_and_pipe = None
args.title = None
args.player = "mpv"
args.player_args = ""
self.assertIsInstance(create_output(FakePlugin), PlayerOutput)
def test_create_output_file_output(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.args = args = Mock()
streamlink_cli.main.console = Mock()
args.output = tmpfile.name
self.assertTrue(os.path.exists(tmpfile.name))
self.assertIsInstance(create_output(FakePlugin), FileOutput)
finally:
tmpfile.close()
def test_create_output_stdout(self):
streamlink_cli.main.console = Mock()
streamlink_cli.main.args = args = Mock()
args.output = None
args.stdout = True
self.assertIsInstance(create_output(FakePlugin), FileOutput)
def test_create_output_record_and_pipe(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = Mock()
streamlink_cli.main.args = args = Mock()
args.output = None
args.stdout = None
args.record_and_pipe = tmpfile.name
self.assertIsInstance(create_output(FakePlugin), FileOutput)
finally:
tmpfile.close()
def test_create_output_record(self):
tmpfile = tempfile.NamedTemporaryFile()
try:
streamlink_cli.main.console = Mock()
streamlink_cli.main.args = args = Mock()
args.output = None
args.stdout = None
args.record = tmpfile.name
args.record_and_pipe = None
args.title = None
args.player = "mpv"
args.player_args = ""
args.player_fifo = None
self.assertIsInstance(create_output(FakePlugin), PlayerOutput)
finally:
tmpfile.close()
def test_create_output_record_and_other_file_output(self):
streamlink_cli.main.console = console = Mock()
streamlink_cli.main.args = args = Mock()
console.exit = Mock()
args.output = None
args.stdout = True
args.record_and_pipe = True
create_output(FakePlugin)
console.exit.assert_called_with("Cannot use record options with other file output options.")
@patch("streamlink_cli.main.log")
class TestCLIMainSetupConfigArgs(unittest.TestCase):
configdir = Path(tests.resources.__path__[0], "cli", "config")
parser = Mock()
@classmethod
def subject(cls, config_files, **args):
def resolve_url(name):
if name == "noplugin":
raise NoPluginError()
return Mock(module="testplugin")
session = Mock()
session.resolve_url.side_effect = resolve_url
args.setdefault("url", "testplugin")
with patch("streamlink_cli.main.setup_args") as mock_setup_args, \
patch("streamlink_cli.main.args", **args), \
patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.CONFIG_FILES", config_files):
setup_config_args(cls.parser)
return mock_setup_args
def test_no_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None,
url="noplugin"
)
expected = [self.configdir / "primary"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_primary(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_secondary_deprecated(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "secondary", self.configdir / "secondary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded config from deprecated path, see CLI docs for how to migrate: {expected[0]}"),
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_with_primary_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_custom_with_deprecated_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", DeprecatedPath(self.configdir / "secondary.testplugin")]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_multiple(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "non-existent"), str(self.configdir / "primary"), str(self.configdir / "secondary")]
)
expected = [self.configdir / "secondary", self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
class _TestCLIMainLogging(unittest.TestCase):
@classmethod
def subject(cls, argv):
session = Streamlink()
session.load_plugins(os.path.join(os.path.dirname(__file__), "plugin"))
def _log_current_arguments(*args, **kwargs):
log_current_arguments(*args, **kwargs)
raise SystemExit
with patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.log_current_arguments", side_effect=_log_current_arguments), \
patch("streamlink_cli.main.CONFIG_FILES", []), \
patch("streamlink_cli.main.setup_signals"), \
patch("streamlink_cli.main.setup_streamlink"), \
patch("streamlink_cli.main.setup_plugins"), \
patch("streamlink_cli.main.setup_http_session"), \
patch("streamlink.session.Streamlink.load_builtin_plugins"), \
patch("sys.argv") as mock_argv:
mock_argv.__getitem__.side_effect = lambda x: argv[x]
try:
streamlink_cli.main.main()
except SystemExit:
pass
def tearDown(self):
streamlink_cli.main.logger.root.handlers.clear()
# python >=3.7.2: https://bugs.python.org/issue35046
_write_calls = (
([call("[cli][info] foo\n")]
if sys.version_info >= (3, 7, 2)
else [call("[cli][info] foo"), call("\n")])
+ [call("bar\n")]
)
def write_file_and_assert(self, mock_mkdir: Mock, mock_write: Mock, mock_stdout: Mock):
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(mock_mkdir.mock_calls, [call(parents=True, exist_ok=True)])
self.assertEqual(mock_write.mock_calls, self._write_calls)
self.assertFalse(mock_stdout.write.called)
class TestCLIMainLogging(_TestCLIMainLogging):
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.os.geteuid", Mock(return_value=0))
def test_log_root_warning(self, mock_log):
self.subject(["streamlink"])
self.assertEqual(mock_log.info.mock_calls, [call("streamlink is running as root! Be careful!")])
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.streamlink_version", "streamlink")
@patch("streamlink_cli.main.requests.__version__", "requests")
@patch("streamlink_cli.main.socks_version", "socks")
@patch("streamlink_cli.main.websocket_version", "websocket")
@patch("platform.python_version", Mock(return_value="python"))
def test_log_current_versions(self, mock_log):
self.subject(["streamlink", "--loglevel", "info"])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
with patch("sys.platform", "linux"), \
patch("platform.platform", Mock(return_value="linux")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: linux"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "darwin"), \
patch("platform.mac_ver", Mock(return_value=["0.0.0"])):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: macOS 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "win32"), \
patch("platform.system", Mock(return_value="Windows")), \
patch("platform.release", Mock(return_value="0.0.0")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: Windows 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
@patch("streamlink_cli.main.log")
def test_log_current_arguments(self, mock_log):
self.subject([
"streamlink",
"--loglevel", "info"
])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
self.subject([
"streamlink",
"--loglevel", "debug",
"-p", "custom",
"--testplugin-bool",
"--testplugin-password=secret",
"website.tld/channel",
"best,worst"
])
self.assertEqual(
mock_log.debug.mock_calls[-7:],
[
call("Arguments:"),
call(" url=website.tld/channel"),
call(" stream=['best', 'worst']"),
call(" --loglevel=debug"),
call(" --player=custom"),
call(" --testplugin-bool=True"),
call(" --testplugin-password=********")
]
)
class TestCLIMainLoggingLogfile(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_no_logfile(self, mock_open, mock_stdout):
self.subject(["streamlink"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, self._write_calls)
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_loglevel_none(self, mock_open, mock_stdout):
self.subject(["streamlink", "--loglevel", "none", "--logfile", "foo"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, [call("bar\n")])
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_relative(self, mock_open, mock_stdout):
path = Path("foo").resolve()
self.subject(["streamlink", "--logfile", "foo"])
self.write_file_and_assert(
mock_mkdir=path.mkdir,
mock_write=mock_open(str(path), "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
class TestCLIMainLoggingLogfilePosix(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "/foo/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"HOME": "/foo"}):
self.subject(["streamlink", "--logfile", "~/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", PosixPath("/foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(not is_win32, "test only applicable on Windows")
class TestCLIMainLoggingLogfileWindows(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "C:\\foo\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"USERPROFILE": "C:\\foo"}):
self.subject(["streamlink", "--logfile", "~\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", WindowsPath("C:\\foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
| bsd-2-clause |
CCI-MOC/GUI-Backend | core/models/application_version.py | 1 | 15022 | """
ApplicationVersion models for atmosphere.
"""
import uuid
from django.db import models, IntegrityError
from django.db.models import Q
from django.utils import timezone
from django.contrib.auth.models import AnonymousUser
from threepio import logger
from django.core.exceptions import ObjectDoesNotExist as DoesNotExist
from core.models.provider import AccountProvider
from core.models.license import License
from core.models.identity import Identity
from core.query import only_current_source, only_current, only_current_machines_in_version
class ApplicationVersion(models.Model):
"""
As an Application is Updated/Forked, it may be replicated
across server different providermachines/volumes.
When creating the request the author will usually
create/change 'common information'
Things like:
- Description
- Installed Software
- Excluded Files
This is a container for that information.
NOTE: Using this as the 'model' for DB moving to ID==UUID format.
"""
# Required
id = models.UUIDField(primary_key=True, default=uuid.uuid4, unique=True, editable=False)
application = models.ForeignKey("Application", related_name="versions")
# NOTE: Parent is 'null' when this version was created by a STAFF user
# (For Ex: imported an image, etc.)
parent = models.ForeignKey("ApplicationVersion", blank=True, null=True)
name = models.CharField(max_length=256)
# Optional/default available
change_log = models.TextField(null=True, blank=True)
allow_imaging = models.BooleanField(default=True)
start_date = models.DateTimeField(default=timezone.now)
end_date = models.DateTimeField(null=True, blank=True)
# User/Identity that created the version object
created_by = models.ForeignKey('AtmosphereUser')
created_by_identity = models.ForeignKey(Identity, null=True)
# TODO: Decide if we want to enable this information.. Is it useful?
# As it stands now, we collect this information on the request, but
# this would allow users to edit/interact/view?
system_files = models.TextField(default='', null=True, blank=True)
installed_software = models.TextField(default='', null=True, blank=True)
excluded_files = models.TextField(default='', null=True, blank=True)
licenses = models.ManyToManyField(License,
blank=True, related_name='application_versions')
boot_scripts = models.ManyToManyField(
"BootScript",
blank=True,
related_name='application_versions')
membership = models.ManyToManyField('Group',
related_name='application_versions',
through='ApplicationVersionMembership',
blank=True)
class Meta:
db_table = 'application_version'
app_label = 'core'
unique_together = ('application', 'name')
# NOTE: Created_by, created_by_ident will be == Application (EVERY TIME!)
def __unicode__(self):
return "%s - %s - %s" % (self.application.name,
self.name,
self.start_date if not self.end_date else "END-DATED")
def get_threshold(self):
# TODO: except ObjectDoesNotExist to avoid core import loop
from core.models.application import ApplicationThreshold
try:
return self.threshold
except ApplicationThreshold.DoesNotExist:
return None
def end_date_all(self, now=None):
if not now:
now = timezone.now()
for machine in self.machines.all():
if not machine.end_date:
machine.end_date = now
machine.save()
if not self.end_date:
self.end_date = now
self.save()
def active_machines(self):
"""
Show machines that are from an active provider and non-end-dated.
"""
return self.machines.filter(only_current_source())
def _split_mail(self, email, unknown_str='unknown'):
return email.split('@')[1].split('.')[-1:][0] if email else unknown_str
def get_metrics(self, now_time=None):
"""
# TODO: Consider how this question could be answered
# with 'allocation' and the engine/routines used inside it..
"""
if not now_time:
now_time = timezone.now()
machines = self.machines.all()
provider_map = {}
user_domain_map = {}
for prov_machine in machines:
instance_mgr = prov_machine.instance_source.instances
total_time = timezone.timedelta(0)
count = instance_mgr.count()
for instance in instance_mgr.all():
iid = instance.id
end_at = instance.end_date if instance.end_date else now_time
start_at = instance.start_date
user = instance.created_by
email_str = self._split_mail(user.email)
count = user_domain_map.get(email_str, 0)
count += 1
user_domain_map[email_str] = count
diff = max(end_at - start_at, timezone.timedelta(0)) # Guarantee positive results
total_time += diff
avg_time = total_time / count if count else timezone.timedelta(0)
key = prov_machine.provider.location
metrics = {
'count': count,
'total': total_time,
'avg_time': avg_time,
}
provider_map[key] = metrics
return {
'domains': user_domain_map,
'providers': provider_map
}
@classmethod
def get_admin_image_versions(cls, user):
"""
TODO: This 'just works' and is probably very slow... Look for a better way?
"""
provider_id_list = user.identity_set.values_list('provider', flat=True)
account_providers_list = AccountProvider.objects.filter(
provider__id__in=provider_id_list)
admin_users = [ap.identity.created_by for ap in account_providers_list]
version_ids = []
for user in admin_users:
version_ids.extend(
user.applicationversion_set.values_list('id', flat=True))
admin_list = ApplicationVersion.objects.filter(
id__in=version_ids)
return admin_list
@classmethod
def current_machines(cls, request_user):
# Showing non-end dated, public ApplicationVersions
public_set = ApplicationVersion.objects.filter(
only_current(),
only_current_machines_in_version(),
application__private=False)
if not isinstance(request_user, AnonymousUser):
# NOTE: Showing 'my pms EVEN if they are end-dated.
my_set = ApplicationVersion.objects.filter(
Q(created_by=request_user) |
Q(application__created_by=request_user) |
Q(machines__instance_source__created_by=request_user))
all_group_ids = request_user.group_set.values('id')
# Showing non-end dated, shared ApplicationVersions
shared_set = ApplicationVersion.objects.filter(
only_current(), only_current_machines_in_version(), Q(
membership=all_group_ids) | Q(
machines__members__in=all_group_ids))
if request_user.is_staff:
admin_set = cls.get_admin_image_versions(request_user)
else:
admin_set = ApplicationVersion.objects.none()
else:
admin_set = shared_set = my_set = ApplicationVersion.objects.none()
# Make sure no dupes.
all_versions = (public_set | shared_set | my_set | admin_set).distinct()
return all_versions
@property
def machine_ids(self):
return self.machines.values_list(
'instance_source__identifier',
flat=True)
@property
def str_id(self):
return str(self.id)
@property
def icon_url(self):
return self.icon.url if self.icon else None
def is_owner(self, atmo_user):
return (self.created_by == atmo_user |
self.application.created_by == atmo_user)
def change_owner(self, identity, user=None, propagate=True):
if not user:
user = identity.created_by
self.created_by = user
self.created_by_identity = identity
self.save()
if propagate:
[m.instance_source.change_owner(identity, user) for m in self.machines.all()]
class ApplicationVersionMembership(models.Model):
"""
Members of a specific ApplicationVersion
Members can view & launch respective machines.
If the can_share flag is set, then members also have ownership--
they can give membership to other users.
The unique_together field ensures just one of those states is true.
NOTE: There IS underlying cloud implementation 9/10 times.
That should be 'hooked' in here!
"""
image_version = models.ForeignKey(ApplicationVersion,
db_column='application_version_id')
group = models.ForeignKey('Group')
can_share = models.BooleanField(default=False)
def __unicode__(self):
return "(ApplicationVersion:%s - Member:%s) " %\
(self.image_version, self.group.name)
class Meta:
db_table = 'application_version_membership'
app_label = 'core'
unique_together = ('image_version', 'group')
def get_version_for_machine(provider_uuid, identifier, fuzzy=False):
"""
Search for a matching version based on the identifier.
fuzzy - When replicating images across providers, use a 'fuzzy search'
to ensure provider machines are appropriately mapped to the
original 'parent' machine's version
"""
if fuzzy:
query = Q(machines__instance_source__identifier=identifier)
else:
query = Q(machines__instance_source__provider__uuid=provider_uuid,
machines__instance_source__identifier=identifier)
try:
return ApplicationVersion.objects.filter(query).distinct().first()
except ApplicationVersion.DoesNotExist:
return None
def get_app_version(app, version, created_by=None, created_by_identity=None):
try:
app_version = ApplicationVersion.objects.get(
name=version,
application=app)
return app_version
except ApplicationVersion.DoesNotExist:
app_version = create_app_version(
app,
version,
created_by,
created_by_identity)
return app_version
def test_machine_in_version(app, version_name, new_machine_id):
"""
Returns 'app_version' IF:
a version exists for this app with the version_name
and it is EMPTY OR it includes the machine
Otherwise, return None.
"""
try:
app_version = ApplicationVersion.objects.get(
application=app,
name=version_name)
if app_version.machines.count() == 0 or app_version.machines.filter(
instance_source__identifier=new_machine_id).count() > 0:
return app_version
except DoesNotExist:
return None
def create_unique_version(app, version, created_by, created_by_identity):
while True:
try:
app_version = ApplicationVersion.objects.create(
application=app,
name=version,
created_by=created_by,
created_by_identity=created_by_identity,
)
return app_version
except IntegrityError:
# duplicate_found
logger.warn(
"Version %s is taken for Application %s" %
(version, app))
if not version:
version = "1"
version += ".0"
def merge_duplicated_app_versions(
master_version,
copy_versions=[],
delete_copies=True):
"""
This function will merge together versions
that were created by the 'convert_esh_machine' process.
"""
for version in copy_versions:
if master_version.name not in version.name:
continue
for machine in version.machines.all():
machine.application_version = master_version
machine.save()
if delete_copies:
for version in copy_versions:
if master_version.name not in version.name:
continue
version.delete()
def create_app_version(
app,
version_str,
created_by=None,
created_by_identity=None,
change_log=None, allow_imaging=None, provider_machine_id=None):
if not created_by:
created_by = app.created_by
if not created_by_identity:
created_by_identity = app.created_by_identity
if provider_machine_id:
app_version = test_machine_in_version(app, version_str, provider_machine_id)
if app_version:
app_version.created_by = created_by
app_version.created_by_identity = created_by_identity
app_version.save()
else:
app_version = create_unique_version(
app,
version_str,
created_by,
created_by_identity)
last_version = app.latest_version
if last_version:
# DEFAULT: Use kwargs.. Otherwise: Inherit information from last
if change_log != None:
app_version.change_log = change_log
else:
app_version.change_log = last_version.change_log
if allow_imaging != None:
app_version.allow_imaging = allow_imaging
else:
app_version.allow_imaging = last_version.allow_imaging
app_version.save()
transfer_licenses(last_version, app_version)
transfer_membership(last_version, app_version)
else:
if change_log == None:
change_log = "New Application %s - Version %s" % (app.name, app_version.name)
if allow_imaging == None:
allow_imaging = True
app_version.change_log = change_log
app_version.allow_imaging = allow_imaging
app_version.save()
return app_version
def transfer_licenses(parent_version, new_version):
if parent_version.licenses.count():
for license in parent_version.licenses.all():
new_version.licenses.add(license)
def transfer_membership(parent_version, new_version):
if parent_version.membership.count():
for member in parent_version.membership.all():
old_membership = ApplicationVersionMembership.objects.get(
group=member, image_version=parent_version)
membership, _ = ApplicationVersionMembership.objects.get_or_create(
image_version=new_version,
group=old_membership.group,
can_share=old_membership.can_share)
| apache-2.0 |
kidmillions/Stino | stino/pyarduino/base/pyserial/serialwin32.py | 18 | 16466 | #! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# serial driver for win32
# see __init__.py
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# Initial patch to use ctypes by Giovanni Bajo <rasky@develer.com>
import ctypes
from . import win32
from .serialutil import *
def device(portnum):
"""Turn a port number into a device name"""
return 'COM%d' % (portnum+1) # numbers are transformed to a string
class Win32Serial(SerialBase):
"""Serial port implementation for Win32 based on ctypes."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def __init__(self, *args, **kwargs):
self.hComPort = None
self._rtsToggle = False
SerialBase.__init__(self, *args, **kwargs)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
# the "\\.\COMx" format is required for devices other than COM1-COM8
# not all versions of windows seem to support this properly
# so that the first few ports are used with the DOS device name
port = self.portstr
try:
if port.upper().startswith('COM') and int(port[3:]) > 8:
port = '\\\\.\\' + port
except ValueError:
# for like COMnotanumber
pass
self.hComPort = win32.CreateFile(port,
win32.GENERIC_READ | win32.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32.OPEN_EXISTING,
win32.FILE_ATTRIBUTE_NORMAL | win32.FILE_FLAG_OVERLAPPED,
0)
if self.hComPort == win32.INVALID_HANDLE_VALUE:
self.hComPort = None # 'cause __del__ is called anyway
raise SerialException("could not open port %s: %s" % (self.portstr, ctypes.WinError()))
# Setup a 4k buffer
win32.SetupComm(self.hComPort, 4096, 4096)
# Save original timeout values:
self._orgTimeouts = win32.COMMTIMEOUTS()
win32.GetCommTimeouts(self.hComPort, ctypes.byref(self._orgTimeouts))
self._rtsState = win32.RTS_CONTROL_ENABLE
self._dtrState = win32.DTR_CONTROL_ENABLE
self._reconfigurePort()
# Clear buffers:
# Remove anything that was there
win32.PurgeComm(self.hComPort,
win32.PURGE_TXCLEAR | win32.PURGE_TXABORT |
win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
self._overlappedRead = win32.OVERLAPPED()
self._overlappedRead.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32.OVERLAPPED()
#~ self._overlappedWrite.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlappedWrite.hEvent = win32.CreateEvent(None, 0, 0, None)
self._isOpen = True
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if not self.hComPort:
raise SerialException("Can only operate on a valid port handle")
# Set Windows timeout values
# timeouts is a tuple with the following items:
# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
if self._timeout is None:
timeouts = (0, 0, 0, 0, 0)
elif self._timeout == 0:
timeouts = (win32.MAXDWORD, 0, 0, 0, 0)
else:
timeouts = (0, 0, int(self._timeout*1000), 0, 0)
if self._timeout != 0 and self._interCharTimeout is not None:
timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
if self._writeTimeout is None:
pass
elif self._writeTimeout == 0:
timeouts = timeouts[:-2] + (0, win32.MAXDWORD)
else:
timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000))
win32.SetCommTimeouts(self.hComPort, ctypes.byref(win32.COMMTIMEOUTS(*timeouts)))
win32.SetCommMask(self.hComPort, win32.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32.DCB()
win32.GetCommState(self.hComPort, ctypes.byref(comDCB))
comDCB.BaudRate = self._baudrate
if self._bytesize == FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
comDCB.Parity = win32.NOPARITY
comDCB.fParity = 0 # Disable Parity Check
elif self._parity == PARITY_EVEN:
comDCB.Parity = win32.EVENPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_ODD:
comDCB.Parity = win32.ODDPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_MARK:
comDCB.Parity = win32.MARKPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_SPACE:
comDCB.Parity = win32.SPACEPARITY
comDCB.fParity = 1 # Enable Parity Check
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
comDCB.StopBits = win32.ONESTOPBIT
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
comDCB.StopBits = win32.ONE5STOPBITS
elif self._stopbits == STOPBITS_TWO:
comDCB.StopBits = win32.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rtscts:
comDCB.fRtsControl = win32.RTS_CONTROL_HANDSHAKE
elif self._rtsToggle:
comDCB.fRtsControl = win32.RTS_CONTROL_TOGGLE
else:
comDCB.fRtsControl = self._rtsState
if self._dsrdtr:
comDCB.fDtrControl = win32.DTR_CONTROL_HANDSHAKE
else:
comDCB.fDtrControl = self._dtrState
if self._rtsToggle:
comDCB.fOutxCtsFlow = 0
else:
comDCB.fOutxCtsFlow = self._rtscts
comDCB.fOutxDsrFlow = self._dsrdtr
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
comDCB.XonChar = XON
comDCB.XoffChar = XOFF
if not win32.SetCommState(self.hComPort, ctypes.byref(comDCB)):
raise ValueError("Cannot configure port, some setting was wrong. Original message: %s" % ctypes.WinError())
#~ def __del__(self):
#~ self.close()
def close(self):
"""Close port"""
if self._isOpen:
if self.hComPort:
# Restore original timeout values:
win32.SetCommTimeouts(self.hComPort, self._orgTimeouts)
# Close COM-Port:
win32.CloseHandle(self.hComPort)
win32.CloseHandle(self._overlappedRead.hEvent)
win32.CloseHandle(self._overlappedWrite.hEvent)
self.hComPort = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbInQue
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self.hComPort: raise portNotOpenError
if size > 0:
win32.ResetEvent(self._overlappedRead.hEvent)
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
if self.timeout == 0:
n = min(comstat.cbInQue, size)
if n > 0:
buf = ctypes.create_string_buffer(n)
rc = win32.DWORD()
err = win32.ReadFile(self.hComPort, buf, n, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("ReadFile failed (%s)" % ctypes.WinError())
err = win32.WaitForSingleObject(self._overlappedRead.hEvent, win32.INFINITE)
read = buf.raw[:rc.value]
else:
read = bytes()
else:
buf = ctypes.create_string_buffer(size)
rc = win32.DWORD()
err = win32.ReadFile(self.hComPort, buf, size, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("ReadFile failed (%s)" % ctypes.WinError())
err = win32.GetOverlappedResult(self.hComPort, ctypes.byref(self._overlappedRead), ctypes.byref(rc), True)
read = buf.raw[:rc.value]
else:
read = bytes()
return bytes(read)
def write(self, data):
"""Output the given string over the serial port."""
if not self.hComPort: raise portNotOpenError
#~ if not isinstance(data, (bytes, bytearray)):
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
# convert data (needed in case of memoryview instance: Py 3.1 io lib), ctypes doesn't like memoryview
data = bytes(data)
if data:
#~ win32event.ResetEvent(self._overlappedWrite.hEvent)
n = win32.DWORD()
err = win32.WriteFile(self.hComPort, data, len(data), ctypes.byref(n), self._overlappedWrite)
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("WriteFile failed (%s)" % ctypes.WinError())
if self._writeTimeout != 0: # if blocking (None) or w/ write timeout (>0)
# Wait for the write to complete.
#~ win32.WaitForSingleObject(self._overlappedWrite.hEvent, win32.INFINITE)
err = win32.GetOverlappedResult(self.hComPort, self._overlappedWrite, ctypes.byref(n), True)
if n.value != len(data):
raise writeTimeoutError
return n.value
else:
return 0
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32.PurgeComm(self.hComPort, win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32.PurgeComm(self.hComPort, win32.PURGE_TXCLEAR | win32.PURGE_TXABORT)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given duration."""
if not self.hComPort: raise portNotOpenError
import time
win32.SetCommBreak(self.hComPort)
time.sleep(duration)
win32.ClearCommBreak(self.hComPort)
def setBreak(self, level=1):
"""Set break: Controls TXD. When active, to transmitting is possible."""
if not self.hComPort: raise portNotOpenError
if level:
win32.SetCommBreak(self.hComPort)
else:
win32.ClearCommBreak(self.hComPort)
def setRTS(self, level=1):
"""Set terminal status line: Request To Send"""
if not self.hComPort: raise portNotOpenError
if level:
self._rtsState = win32.RTS_CONTROL_ENABLE
win32.EscapeCommFunction(self.hComPort, win32.SETRTS)
else:
self._rtsState = win32.RTS_CONTROL_DISABLE
win32.EscapeCommFunction(self.hComPort, win32.CLRRTS)
def setDTR(self, level=1):
"""Set terminal status line: Data Terminal Ready"""
if not self.hComPort: raise portNotOpenError
if level:
self._dtrState = win32.DTR_CONTROL_ENABLE
win32.EscapeCommFunction(self.hComPort, win32.SETDTR)
else:
self._dtrState = win32.DTR_CONTROL_DISABLE
win32.EscapeCommFunction(self.hComPort, win32.CLRDTR)
def _GetCommModemStatus(self):
stat = win32.DWORD()
win32.GetCommModemStatus(self.hComPort, ctypes.byref(stat))
return stat.value
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self.hComPort: raise portNotOpenError
return win32.MS_CTS_ON & self._GetCommModemStatus() != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self.hComPort: raise portNotOpenError
return win32.MS_DSR_ON & self._GetCommModemStatus() != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self.hComPort: raise portNotOpenError
return win32.MS_RING_ON & self._GetCommModemStatus() != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self.hComPort: raise portNotOpenError
return win32.MS_RLSD_ON & self._GetCommModemStatus() != 0
# - - platform specific - - - -
def setXON(self, level=True):
"""Platform specific - set flow state."""
if not self.hComPort: raise portNotOpenError
if level:
win32.EscapeCommFunction(self.hComPort, win32.SETXON)
else:
win32.EscapeCommFunction(self.hComPort, win32.SETXOFF)
def outWaiting(self):
"""return how many characters the in the outgoing buffer"""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbOutQue
# functions useful for RS-485 adapters
def setRtsToggle(self, rtsToggle):
"""Change RTS toggle control setting."""
self._rtsToggle = rtsToggle
if self._isOpen: self._reconfigurePort()
def getRtsToggle(self):
"""Get the current RTS toggle control setting."""
return self._rtsToggle
rtsToggle = property(getRtsToggle, setRtsToggle, doc="RTS toggle control setting")
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(Win32Serial, FileLike):
pass
else:
# io library present
class Serial(Win32Serial, io.RawIOBase):
pass
# Nur Testfunktion!!
if __name__ == '__main__':
s = Serial(0)
sys.stdout.write("%s\n" % s)
s = Serial()
sys.stdout.write("%s\n" % s)
s.baudrate = 19200
s.databits = 7
s.close()
s.port = 0
s.open()
sys.stdout.write("%s\n" % s)
| mit |
elena/django | tests/forms_tests/widget_tests/test_checkboxselectmultiple.py | 20 | 8278 | import datetime
from django import forms
from django.forms import CheckboxSelectMultiple
from django.test import override_settings
from .base import WidgetTest
class CheckboxSelectMultipleTest(WidgetTest):
widget = CheckboxSelectMultiple
def test_render_value(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J'], html=(
"""<ul>
<li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
))
def test_render_value_multiple(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J', 'P'], html=(
"""<ul>
<li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input checked type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
))
def test_render_none(self):
"""
If the value is None, none of the options are selected, even if the
choices have an empty option.
"""
self.check_html(self.widget(choices=(('', 'Unknown'),) + self.beatles), 'beatles', None, html=(
"""<ul>
<li><label><input type="checkbox" name="beatles" value=""> Unknown</label></li>
<li><label><input type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
))
def test_nested_choices(self):
nested_choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
html = """
<ul id="media">
<li>
<label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown"> Unknown</label>
</li>
<li>Audio<ul id="media_1">
<li>
<label for="media_1_0">
<input checked id="media_1_0" name="nestchoice" type="checkbox" value="vinyl"> Vinyl
</label>
</li>
<li>
<label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd"> CD</label>
</li>
</ul></li>
<li>Video<ul id="media_2">
<li>
<label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs"> VHS</label>
</li>
<li>
<label for="media_2_1">
<input checked id="media_2_1" name="nestchoice" type="checkbox" value="dvd"> DVD
</label>
</li>
</ul></li>
</ul>
"""
self.check_html(
self.widget(choices=nested_choices), 'nestchoice', ('vinyl', 'dvd'),
attrs={'id': 'media'}, html=html,
)
def test_nested_choices_without_id(self):
nested_choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
html = """
<ul>
<li>
<label><input name="nestchoice" type="checkbox" value="unknown"> Unknown</label>
</li>
<li>Audio<ul>
<li>
<label>
<input checked name="nestchoice" type="checkbox" value="vinyl"> Vinyl
</label>
</li>
<li>
<label><input name="nestchoice" type="checkbox" value="cd"> CD</label>
</li>
</ul></li>
<li>Video<ul>
<li>
<label><input name="nestchoice" type="checkbox" value="vhs"> VHS</label>
</li>
<li>
<label>
<input checked name="nestchoice" type="checkbox" value="dvd"> DVD
</label>
</li>
</ul></li>
</ul>
"""
self.check_html(self.widget(choices=nested_choices), 'nestchoice', ('vinyl', 'dvd'), html=html)
def test_separate_ids(self):
"""
Each input gets a separate ID.
"""
choices = [('a', 'A'), ('b', 'B'), ('c', 'C')]
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li>
<li>
<label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label>
</li>
</ul>
"""
self.check_html(self.widget(choices=choices), 'letters', ['a', 'c'], attrs={'id': 'abc'}, html=html)
def test_separate_ids_constructor(self):
"""
Each input gets a separate ID when the ID is passed to the constructor.
"""
widget = CheckboxSelectMultiple(attrs={'id': 'abc'}, choices=[('a', 'A'), ('b', 'B'), ('c', 'C')])
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li>
<li>
<label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label>
</li>
</ul>
"""
self.check_html(widget, 'letters', ['a', 'c'], html=html)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_doesnt_localize_input_value(self):
choices = [
(1, 'One'),
(1000, 'One thousand'),
(1000000, 'One million'),
]
html = """
<ul>
<li><label><input type="checkbox" name="numbers" value="1"> One</label></li>
<li><label><input type="checkbox" name="numbers" value="1000"> One thousand</label></li>
<li><label><input type="checkbox" name="numbers" value="1000000"> One million</label></li>
</ul>
"""
self.check_html(self.widget(choices=choices), 'numbers', None, html=html)
choices = [
(datetime.time(0, 0), 'midnight'),
(datetime.time(12, 0), 'noon'),
]
html = """
<ul>
<li><label><input type="checkbox" name="times" value="00:00:00"> midnight</label></li>
<li><label><input type="checkbox" name="times" value="12:00:00"> noon</label></li>
</ul>
"""
self.check_html(self.widget(choices=choices), 'times', None, html=html)
def test_use_required_attribute(self):
widget = self.widget(choices=self.beatles)
# Always False because browser validation would require all checkboxes
# to be checked instead of at least one.
self.assertIs(widget.use_required_attribute(None), False)
self.assertIs(widget.use_required_attribute([]), False)
self.assertIs(widget.use_required_attribute(['J', 'P']), False)
def test_value_omitted_from_data(self):
widget = self.widget(choices=self.beatles)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field': 'value'}, {}, 'field'), False)
def test_label(self):
"""
CheckboxSelectMultiple doesn't contain 'for="field_0"' in the <label>
because clicking that would toggle the first checkbox.
"""
class TestForm(forms.Form):
f = forms.MultipleChoiceField(widget=CheckboxSelectMultiple)
bound_field = TestForm()['f']
self.assertEqual(bound_field.field.widget.id_for_label('id'), '')
self.assertEqual(bound_field.label_tag(), '<label>F:</label>')
| bsd-3-clause |
bfontaine/web-pp | pp/unidecode/x09b.py | 252 | 4655 | data = (
'Ti ', # 0x00
'Li ', # 0x01
'Bin ', # 0x02
'Zong ', # 0x03
'Ti ', # 0x04
'Peng ', # 0x05
'Song ', # 0x06
'Zheng ', # 0x07
'Quan ', # 0x08
'Zong ', # 0x09
'Shun ', # 0x0a
'Jian ', # 0x0b
'Duo ', # 0x0c
'Hu ', # 0x0d
'La ', # 0x0e
'Jiu ', # 0x0f
'Qi ', # 0x10
'Lian ', # 0x11
'Zhen ', # 0x12
'Bin ', # 0x13
'Peng ', # 0x14
'Mo ', # 0x15
'San ', # 0x16
'Man ', # 0x17
'Man ', # 0x18
'Seng ', # 0x19
'Xu ', # 0x1a
'Lie ', # 0x1b
'Qian ', # 0x1c
'Qian ', # 0x1d
'Nong ', # 0x1e
'Huan ', # 0x1f
'Kuai ', # 0x20
'Ning ', # 0x21
'Bin ', # 0x22
'Lie ', # 0x23
'Rang ', # 0x24
'Dou ', # 0x25
'Dou ', # 0x26
'Nao ', # 0x27
'Hong ', # 0x28
'Xi ', # 0x29
'Dou ', # 0x2a
'Han ', # 0x2b
'Dou ', # 0x2c
'Dou ', # 0x2d
'Jiu ', # 0x2e
'Chang ', # 0x2f
'Yu ', # 0x30
'Yu ', # 0x31
'Li ', # 0x32
'Juan ', # 0x33
'Fu ', # 0x34
'Qian ', # 0x35
'Gui ', # 0x36
'Zong ', # 0x37
'Liu ', # 0x38
'Gui ', # 0x39
'Shang ', # 0x3a
'Yu ', # 0x3b
'Gui ', # 0x3c
'Mei ', # 0x3d
'Ji ', # 0x3e
'Qi ', # 0x3f
'Jie ', # 0x40
'Kui ', # 0x41
'Hun ', # 0x42
'Ba ', # 0x43
'Po ', # 0x44
'Mei ', # 0x45
'Xu ', # 0x46
'Yan ', # 0x47
'Xiao ', # 0x48
'Liang ', # 0x49
'Yu ', # 0x4a
'Tui ', # 0x4b
'Qi ', # 0x4c
'Wang ', # 0x4d
'Liang ', # 0x4e
'Wei ', # 0x4f
'Jian ', # 0x50
'Chi ', # 0x51
'Piao ', # 0x52
'Bi ', # 0x53
'Mo ', # 0x54
'Ji ', # 0x55
'Xu ', # 0x56
'Chou ', # 0x57
'Yan ', # 0x58
'Zhan ', # 0x59
'Yu ', # 0x5a
'Dao ', # 0x5b
'Ren ', # 0x5c
'Ji ', # 0x5d
'Eri ', # 0x5e
'Gong ', # 0x5f
'Tuo ', # 0x60
'Diao ', # 0x61
'Ji ', # 0x62
'Xu ', # 0x63
'E ', # 0x64
'E ', # 0x65
'Sha ', # 0x66
'Hang ', # 0x67
'Tun ', # 0x68
'Mo ', # 0x69
'Jie ', # 0x6a
'Shen ', # 0x6b
'Fan ', # 0x6c
'Yuan ', # 0x6d
'Bi ', # 0x6e
'Lu ', # 0x6f
'Wen ', # 0x70
'Hu ', # 0x71
'Lu ', # 0x72
'Za ', # 0x73
'Fang ', # 0x74
'Fen ', # 0x75
'Na ', # 0x76
'You ', # 0x77
'Namazu ', # 0x78
'Todo ', # 0x79
'He ', # 0x7a
'Xia ', # 0x7b
'Qu ', # 0x7c
'Han ', # 0x7d
'Pi ', # 0x7e
'Ling ', # 0x7f
'Tuo ', # 0x80
'Bo ', # 0x81
'Qiu ', # 0x82
'Ping ', # 0x83
'Fu ', # 0x84
'Bi ', # 0x85
'Ji ', # 0x86
'Wei ', # 0x87
'Ju ', # 0x88
'Diao ', # 0x89
'Bo ', # 0x8a
'You ', # 0x8b
'Gun ', # 0x8c
'Pi ', # 0x8d
'Nian ', # 0x8e
'Xing ', # 0x8f
'Tai ', # 0x90
'Bao ', # 0x91
'Fu ', # 0x92
'Zha ', # 0x93
'Ju ', # 0x94
'Gu ', # 0x95
'Kajika ', # 0x96
'Tong ', # 0x97
'[?] ', # 0x98
'Ta ', # 0x99
'Jie ', # 0x9a
'Shu ', # 0x9b
'Hou ', # 0x9c
'Xiang ', # 0x9d
'Er ', # 0x9e
'An ', # 0x9f
'Wei ', # 0xa0
'Tiao ', # 0xa1
'Zhu ', # 0xa2
'Yin ', # 0xa3
'Lie ', # 0xa4
'Luo ', # 0xa5
'Tong ', # 0xa6
'Yi ', # 0xa7
'Qi ', # 0xa8
'Bing ', # 0xa9
'Wei ', # 0xaa
'Jiao ', # 0xab
'Bu ', # 0xac
'Gui ', # 0xad
'Xian ', # 0xae
'Ge ', # 0xaf
'Hui ', # 0xb0
'Bora ', # 0xb1
'Mate ', # 0xb2
'Kao ', # 0xb3
'Gori ', # 0xb4
'Duo ', # 0xb5
'Jun ', # 0xb6
'Ti ', # 0xb7
'Man ', # 0xb8
'Xiao ', # 0xb9
'Za ', # 0xba
'Sha ', # 0xbb
'Qin ', # 0xbc
'Yu ', # 0xbd
'Nei ', # 0xbe
'Zhe ', # 0xbf
'Gun ', # 0xc0
'Geng ', # 0xc1
'Su ', # 0xc2
'Wu ', # 0xc3
'Qiu ', # 0xc4
'Ting ', # 0xc5
'Fu ', # 0xc6
'Wan ', # 0xc7
'You ', # 0xc8
'Li ', # 0xc9
'Sha ', # 0xca
'Sha ', # 0xcb
'Gao ', # 0xcc
'Meng ', # 0xcd
'Ugui ', # 0xce
'Asari ', # 0xcf
'Subashiri ', # 0xd0
'Kazunoko ', # 0xd1
'Yong ', # 0xd2
'Ni ', # 0xd3
'Zi ', # 0xd4
'Qi ', # 0xd5
'Qing ', # 0xd6
'Xiang ', # 0xd7
'Nei ', # 0xd8
'Chun ', # 0xd9
'Ji ', # 0xda
'Diao ', # 0xdb
'Qie ', # 0xdc
'Gu ', # 0xdd
'Zhou ', # 0xde
'Dong ', # 0xdf
'Lai ', # 0xe0
'Fei ', # 0xe1
'Ni ', # 0xe2
'Yi ', # 0xe3
'Kun ', # 0xe4
'Lu ', # 0xe5
'Jiu ', # 0xe6
'Chang ', # 0xe7
'Jing ', # 0xe8
'Lun ', # 0xe9
'Ling ', # 0xea
'Zou ', # 0xeb
'Li ', # 0xec
'Meng ', # 0xed
'Zong ', # 0xee
'Zhi ', # 0xef
'Nian ', # 0xf0
'Shachi ', # 0xf1
'Dojou ', # 0xf2
'Sukesou ', # 0xf3
'Shi ', # 0xf4
'Shen ', # 0xf5
'Hun ', # 0xf6
'Shi ', # 0xf7
'Hou ', # 0xf8
'Xing ', # 0xf9
'Zhu ', # 0xfa
'La ', # 0xfb
'Zong ', # 0xfc
'Ji ', # 0xfd
'Bian ', # 0xfe
'Bian ', # 0xff
)
| mit |
eugene7646/autopsy | InternalPythonModules/android/googlemaplocation.py | 5 | 6752 | """
Autopsy Forensic Browser
Copyright 2016-2018 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.datamodel.blackboardutils import GeoArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import GeoWaypoints
from org.sleuthkit.datamodel.blackboardutils.attributes.GeoWaypoints import Waypoint
import traceback
import general
"""
Finds and parses the Google Maps database.
"""
class GoogleMapLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self.current_case = None
self.PROGRAM_NAME = "Google Maps History"
self.CAT_DESTINATION = "Destination"
def analyze(self, dataSource, fileManager, context):
try:
self.current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
return
try:
absFiles = fileManager.findFiles(dataSource, "da_destination_history")
if absFiles.isEmpty():
return
for abstractFile in absFiles:
try:
jFile = File(self.current_case.getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Google map locations", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding Google map locations.
pass
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
artifactHelper = GeoArtifactsHelper(self.current_case.getSleuthkitCase(),
general.MODULE_NAME, self.PROGRAM_NAME, abstractFile)
Class.forName("org.sqlite.JDBC") # load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException) as ex:
self._logger.log(Level.SEVERE, "Error loading JDBC driver", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
except (SQLException) as ex:
# Error opening database.
return
resultSet = None
try:
resultSet = statement.executeQuery(
"SELECT time, dest_lat, dest_lng, dest_title, dest_address, source_lat, source_lng FROM destination_history;")
while resultSet.next():
time = Long.valueOf(resultSet.getString("time")) / 1000
dest_title = resultSet.getString("dest_title")
dest_address = resultSet.getString("dest_address")
dest_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lat"))
dest_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lng"))
source_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lat"))
source_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lng"))
waypointlist = GeoWaypoints()
waypointlist.addPoint(Waypoint(source_lat, source_lng, None, None))
waypointlist.addPoint(Waypoint(dest_lat, dest_lng, None, dest_address))
artifactHelper.addRoute(dest_title, time, waypointlist, None)
except SQLException as ex:
# Unable to execute Google map locations SQL query against database.
pass
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add route artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except Exception as ex:
self._logger.log(Level.SEVERE, "Error processing google maps history.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
# Error closing the database.
pass
# add periods 6 decimal places before the end.
@staticmethod
def convertGeo(s):
length = len(s)
if length > 6:
return Double.valueOf(s[0 : length-6] + "." + s[length-6 : length])
else:
return Double.valueOf(s)
| apache-2.0 |
stutivarshney/Bal-Aveksha | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/django/contrib/sessions/backends/signed_cookies.py | 82 | 2949 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core import signing
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(
self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies',
)
except Exception:
# BadSignature, ValueError, or unpickling exceptions. If any of
# these happen, reset the session.
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return signing.dumps(
session_cache, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer,
)
@classmethod
def clear_expired(cls):
pass
| gpl-3.0 |
nonabelian/tda_dionysus | scripts/pca_demo.py | 1 | 1605 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import topology.data as td
def run_coffee_mug_pca_example():
X, y = td.coffee_mug(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/coffee_mug_pca.png')
plt.show()
def run_pail_pca_example():
X, y = td.pail(bottom_label=0, side_label=0, handle_label=1)
c = ['r' if l else 'b' for l in y]
# Nontrivial rotation around the x-axis
angle = np.pi / 4.0
rotation_matrix = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
X = rotation_matrix.dot(X.T).T
# Perform PCA 3D down to 2D:
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_pca[:,0], X_pca[:,1], c=c)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
plt.savefig('images/pail_pca.png')
plt.show()
if __name__ == '__main__':
run_coffee_mug_pca_example()
run_pail_pca_example()
| gpl-3.0 |
sumanthha/kannadaflix | django/contrib/gis/geos/tests/test_geos.py | 22 | 43714 | import ctypes
import random
import unittest
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID) """
# Raise a warning if SRID <0/None.
import warnings
print "\nBEGIN - expecting Warnings; safe to ignore.\n"
# Test for do-nothing behavior.
try:
# Keeping line-noise down by only printing the relevant
# warnings once.
warnings.simplefilter('once', UserWarning)
warnings.simplefilter('once', FutureWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, -1)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
finally:
warnings.simplefilter('default', UserWarning)
warnings.simplefilter('default', FutureWarning)
print "\nEND - expecting Warnings; safe to ignore.\n"
# test warning is raised
try:
warnings.simplefilter('error', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
finally:
warnings.simplefilter('default', FutureWarning)
warnings.simplefilter('default', UserWarning)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertTrue(not g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test28_geos_version(self):
"Testing the GEOS version regular expression."
from django.contrib.gis.geos.libgeos import version_regex
versions = [ ('3.0.0rc4-CAPI-1.3.3', '3.0.0'),
('3.0.0-CAPI-1.4.1', '3.0.0'),
('3.4.0dev-CAPI-1.8.0', '3.4.0') ]
for v, expected in versions:
m = version_regex.match(v)
self.assertTrue(m)
self.assertEqual(m.group('version'), expected)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test03_monte_carlo/t35_rlvps01.py | 1 | 7662 | import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(3,MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 20000 # number of episodes
EPISODE_LENGTH = 2000 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.001
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.95
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
| mit |
sinbazhou/odoo | addons/website_sale/models/product.py | 262 | 10108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('Promotional Code'),
}
class product_public_category(osv.osv):
_name = "product.public.category"
_description = "Public Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.public.category','Parent Category', select=True),
'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
# NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
# for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
# In this case, the default image is set by the js code.
# NOTE2: image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the category, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata"]
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Comments',
),
'website_published': fields.boolean('Available in the website', copy=False),
'website_description': fields.html('Description for the website', translate=True),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Alternative Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'public_categ_ids': fields.many2many('product.public.category', string='Public Category', help="Those categories are used to group similar products for e-commerce."),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MAX(website_sequence)+1 FROM product_template')
next_sequence = cr.fetchone()[0] or 0
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
'website_published': False,
}
def set_sequence_top(self, cr, uid, ids, context=None):
cr.execute('SELECT MAX(website_sequence) FROM product_template')
max_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context)
def set_sequence_bottom(self, cr, uid, ids, context=None):
cr.execute('SELECT MIN(website_sequence) FROM product_template')
min_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context)
def set_sequence_up(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.set_sequence_top(cr, uid, ids, context=context)
def set_sequence_down(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.set_sequence_bottom(cr, uid, ids, context=context)
class product_product(osv.Model):
_inherit = "product.product"
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.product_tmpl_id.id,)
return res
_columns = {
'website_url': fields.function(_website_url, string="Website url", type="char"),
}
class product_attribute(osv.Model):
_inherit = "product.attribute"
_columns = {
'type': fields.selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], string="Type"),
}
_defaults = {
'type': lambda *a: 'radio',
}
class product_attribute_value(osv.Model):
_inherit = "product.attribute.value"
_columns = {
'color': fields.char("HTML Color Index", help="Here you can set a specific HTML color index (e.g. #ff0000) to display the color on the website if the attibute type is 'Color'."),
}
| agpl-3.0 |
githubashto/userinfuser | serverside/fantasm/main.py | 24 | 1997 | """ Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Main module for fantasm implementation.
This module should be specified as a handler for fantasm URLs in app.yaml:
handlers:
- url: /fantasm/.*
login: admin
script: fantasm/main.py
"""
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from fantasm import handlers, console
def createApplication():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all fantasm handlers registered.
"""
return webapp.WSGIApplication([
(r"^/[^\/]+/fsm/.+", handlers.FSMHandler),
(r"^/[^\/]+/cleanup/", handlers.FSMFanInCleanupHandler),
(r"^/[^\/]+/graphviz/.+", handlers.FSMGraphvizHandler),
(r"^/[^\/]+/log/", handlers.FSMLogHandler),
(r"^/[^\/]+/?", console.Dashboard),
],
debug=True)
APP = createApplication()
def main():
""" Main entry point. """
import os
if os.environ.get('SERVER_SOFTWARE') == 'Development/1.0':
# this seems to be a dev_appserver.py bug. causes unicode errors when trying to process the request
os.environ['QUERY_STRING'] = str(os.environ['QUERY_STRING'])
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| gpl-3.0 |
steveklabnik/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/html5parser.py | 423 | 117297 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| mpl-2.0 |
bhargav2408/kivy | kivy/input/providers/linuxwacom.py | 51 | 14837 | '''
Native support of Wacom tablet from linuxwacom driver
=====================================================
To configure LinuxWacom, add this to your configuration::
[input]
pen = linuxwacom,/dev/input/event2,mode=pen
finger = linuxwacom,/dev/input/event3,mode=touch
.. note::
You must have read access to the input event.
You can use a custom range for the X, Y and pressure values.
On some drivers, the range reported is invalid.
To fix that, you can add these options to the argument line:
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
'''
__all__ = ('LinuxWacomMotionEventProvider', 'LinuxWacomMotionEvent')
import os
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class LinuxWacomMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = ShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(LinuxWacomMotionEvent, self).depack(args)
def __str__(self):
return '<LinuxWacomMotionEvent id=%d pos=(%f, %f) device=%s>' \
% (self.id, self.sx, self.sy, self.device)
if 'KIVY_DOC' in os.environ:
# documentation hack
LinuxWacomMotionEventProvider = None
else:
import threading
import collections
import struct
import fcntl
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.logger import Logger
#
# This part is taken from linux-source-2.6.32/include/linux/input.h
#
# Event types
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_SW = 0x05
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
EV_CNT = (EV_MAX + 1)
KEY_MAX = 0x2ff
# Synchronization events
SYN_REPORT = 0
SYN_CONFIG = 1
SYN_MT_REPORT = 2
# Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
MSC_CNT = (MSC_MAX + 1)
ABS_X = 0x00
ABS_Y = 0x01
ABS_PRESSURE = 0x18
ABS_MISC = 0x28 # if 0, it's touch up
ABS_MT_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
ABS_MT_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
ABS_MT_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
ABS_MT_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
ABS_MT_ORIENTATION = 0x34 # Ellipse orientation
ABS_MT_POSITION_X = 0x35 # Center X ellipse position
ABS_MT_POSITION_Y = 0x36 # Center Y ellipse position
ABS_MT_TOOL_TYPE = 0x37 # Type of touching device
ABS_MT_BLOB_ID = 0x38 # Group a set of packets as a blob
ABS_MT_TRACKING_ID = 0x39 # Unique ID of initiated contact
ABS_MT_PRESSURE = 0x3a # Pressure on contact area
# some ioctl base (with 0 value)
EVIOCGNAME = 2147501318
EVIOCGBIT = 2147501344
EVIOCGABS = 2149074240
# sizeof(struct input_event)
struct_input_event_sz = struct.calcsize('LLHHi')
struct_input_absinfo_sz = struct.calcsize('iiiiii')
sz_l = struct.calcsize('Q')
class LinuxWacomMotionEventProvider(MotionEventProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'invert_x', 'invert_y')
def __init__(self, device, args):
super(LinuxWacomMotionEventProvider, self).__init__(device, args)
self.input_fn = None
self.default_ranges = dict()
self.mode = 'touch'
# split arguments
args = args.split(',')
if not args:
Logger.error('LinuxWacom: No filename given in config')
Logger.error('LinuxWacom: Use /dev/input/event0 for example')
return None
# read filename
self.input_fn = args[0]
Logger.info('LinuxWacom: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
err = 'LinuxWacom: Bad parameter' \
'%s: Not in key=value format.' % arg
Logger.error(err)
continue
# ensure the key exist
key, value = arg
if key == 'mode':
self.mode = value
continue
if key not in LinuxWacomMotionEventProvider.options:
Logger.error('LinuxWacom: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
err = 'LinuxWacom: value %s invalid for %s' % (key, value)
Logger.error(err)
continue
# all good!
msg = 'LinuxWacom: Set custom %s to %d' % (key, int(value))
Logger.info(msg)
Logger.info('LinuxWacom: mode is <%s>' % self.mode)
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
l_points = {}
# prepare some vars to get limit of some component
range_min_position_x = 0
range_max_position_x = 2048
range_min_position_y = 0
range_max_position_y = 2048
range_min_pressure = 0
range_max_pressure = 255
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
reset_touch = False
def process(points):
actives = list(points.keys())
for args in points.values():
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = LinuxWacomMotionEvent(device, tid, args)
touches[touch.id] = touch
if touch.sx == args['x'] \
and touch.sy == args['y'] \
and tid in touches_sent:
continue
touch.move(args)
if tid not in touches_sent:
queue.append(('begin', touch))
touches_sent.append(tid)
queue.append(('update', touch))
for tid in list(touches.keys())[:]:
if tid not in actives:
touch = touches[tid]
if tid in touches_sent:
touch.update_time_end()
queue.append(('end', touch))
touches_sent.remove(tid)
del touches[tid]
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open the input
try:
fd = open(input_fn, 'rb')
except IOError:
Logger.exception('Unable to open %s' % input_fn)
return
# get the controler name (EVIOCGNAME)
device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
" " * 256).split('\x00')[0]
Logger.info('LinuxWacom: using <%s>' % device_name)
# get abs infos
bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
bit, = struct.unpack('Q', bit)
for x in range(EV_MAX):
# preserve this, we may want other things than EV_ABS
if x != EV_ABS:
continue
# EV_ABS available for this device ?
if (bit & (1 << x)) == 0:
continue
# ask abs info keys to the devices
sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
' ' * sz_l)
sbit, = struct.unpack('Q', sbit)
for y in range(KEY_MAX):
if (sbit & (1 << y)) == 0:
continue
absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
(struct_input_absinfo_sz << 16),
' ' * struct_input_absinfo_sz)
abs_value, abs_min, abs_max, abs_fuzz, \
abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
if y == ABS_X:
range_min_position_x = drs('min_position_x', abs_min)
range_max_position_x = drs('max_position_x', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position X is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_Y:
range_min_position_y = drs('min_position_y', abs_min)
range_max_position_y = drs('max_position_y', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position Y is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_PRESSURE:
range_min_pressure = drs('min_pressure', abs_min)
range_max_pressure = drs('max_pressure', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range pressure is %d - %d' % (
device_name, abs_min, abs_max))
# read until the end
changed = False
touch_id = 0
touch_x = 0
touch_y = 0
touch_pressure = 0
while fd:
data = fd.read(struct_input_event_sz)
if len(data) < struct_input_event_sz:
break
# extract each event
for i in range(len(data) / struct_input_event_sz):
ev = data[i * struct_input_event_sz:]
# extract timeval + event infos
tv_sec, tv_usec, ev_type, ev_code, ev_value = \
struct.unpack('LLHHi', ev[:struct_input_event_sz])
if ev_type == EV_SYN and ev_code == SYN_REPORT:
if touch_id in l_points:
p = l_points[touch_id]
else:
p = dict()
l_points[touch_id] = p
p['id'] = touch_id
if reset_touch is False:
p['x'] = touch_x
p['y'] = touch_y
p['pressure'] = touch_pressure
if self.mode == 'pen' \
and touch_pressure == 0 \
and not reset_touch:
del l_points[touch_id]
if changed:
if not 'x' in p:
reset_touch = False
continue
process(l_points)
changed = False
if reset_touch:
l_points.clear()
reset_touch = False
process(l_points)
elif ev_type == EV_MSC and ev_code == MSC_SERIAL:
touch_id = ev_value
elif ev_type == EV_ABS and ev_code == ABS_X:
val = normalize(ev_value,
range_min_position_x,
range_max_position_x)
if invert_x:
val = 1. - val
touch_x = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_Y:
val = 1. - normalize(ev_value,
range_min_position_y,
range_max_position_y)
if invert_y:
val = 1. - val
touch_y = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_PRESSURE:
touch_pressure = normalize(ev_value,
range_min_pressure,
range_max_pressure)
changed = True
elif ev_type == EV_ABS and ev_code == ABS_MISC:
if ev_value == 0:
reset_touch = True
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
MotionEventFactory.register('linuxwacom', LinuxWacomMotionEventProvider)
| mit |
chenjiee815/inner_rpc | inner_rpc/test/test_utils.py | 1 | 2853 | #!/usr/bin/env python
# encoding=utf-8
import os
import os.path as osp
import random
import unittest
import socket
from inner_rpc import utils, ir_exceptions
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.__server_socket = None
self.__client_socket = None
self.__unix_path = '/tmp/test_inner_rpc_utils.sock'
return True
def tearDown(self):
if self.__server_socket:
self.__server_socket.close()
if self.__client_socket:
self.__client_socket.close()
if osp.isfile(self.__unix_path):
os.unlink(self.__unix_path)
return True
def __get_random_string(self, length):
get_rand_chr = lambda: chr(random.randint(0, 0xFF))
rand_str = (get_rand_chr() for _ in xrange(length))
return ''.join(rand_str)
def test_picker(self):
picker = utils.Picker()
test_data = self.__get_random_string(1024)
dump_data = picker.dump(test_data)
load_data = picker.load(dump_data)
self.assertEqual(test_data, load_data)
def test_serversockets_ipport(self):
ss = utils.ServerSockets(ip='127.0.0.1', port=9000)
self.__server_socket = ss.get()
self.assertEqual(self.__server_socket.family, socket.AF_INET)
self.assertEqual(self.__server_socket.type, socket.SOCK_STREAM)
def test_serversockets_unixpath(self):
ss = utils.ServerSockets(unix_path=self.__unix_path)
self.__server_socket = ss.get()
self.assertEqual(self.__server_socket.family, socket.AF_UNIX)
self.assertEqual(self.__server_socket.type, socket.SOCK_STREAM)
def test_clientsockets_ipport(self):
cs = utils.ClientSockets(ip='127.0.0.1', port=9000)
with self.assertRaises(ir_exceptions.SocketError):
cs.get()
ss = utils.ServerSockets(ip='127.0.0.1', port=9000)
self.__server_socket = ss.get()
self.__client_socket = cs.get()
self.assertEqual(self.__client_socket.family, socket.AF_INET)
self.assertEqual(self.__client_socket.type, socket.SOCK_STREAM)
def test_clientsockets_unixpath(self):
cs = utils.ClientSockets(unix_path=self.__unix_path)
with self.assertRaises(ir_exceptions.SocketError):
cs.get()
ss = utils.ServerSockets(unix_path=self.__unix_path)
self.__server_socket = ss.get()
self.__client_socket = cs.get()
self.assertEqual(self.__client_socket.family, socket.AF_UNIX)
self.assertEqual(self.__client_socket.type, socket.SOCK_STREAM)
def test_rpcfuncs(self):
rf = utils.RPCFuncs()
test_func = lambda: True
rf.register(test_func, 'test_func_name')
self.assertEqual(test_func, rf.get('test_func_name'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
chandler14362/Bamboo | bindings/python/setup.py | 1 | 1129 | #!/usr/bin/python
from distutils.core import setup, Extension
import os
scriptDir = os.path.dirname(os.path.realpath(__file__))
buildDir = os.path.join(scriptDir, '../../build')
includeDirs = [buildDir, os.path.join(scriptDir, '../../src')]
libraryDirs = [buildDir]
module = Extension('bamboo',
include_dirs = includeDirs,
sources = ['pythonBindings.cpp'],
libraries = ['bamboo'],
library_dirs = libraryDirs)
module.extra_compile_args = ['--std=c++11']
setup(name = 'bamboo', version = '0.0', license = 'BSD',
description = 'Bamboo is a library for defining object-oriented message protocols.',
url = 'https://github.com/Astron/Bamboo',
ext_modules=[module],
author = 'kestred',
author_email = 'kestred@riotcave.com',
classifiers = [
'Programming Language :: C++',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
])
| bsd-3-clause |
dionysio/django_upwork_portfolio | base/models.py | 1 | 1578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image as Img
from io import BytesIO
import uuid
from django.db import models
from django.core.files.uploadedfile import InMemoryUploadedFile
class School(models.Model):
school_id = models.AutoField(primary_key=True)
title = models.TextField()
started = models.DateField()
finished = models.DateField()
description = models.TextField()
major = models.TextField()
class Meta:
ordering = ['-started']
class Project(models.Model):
project_id = models.AutoField(primary_key=True)
title = models.TextField()
image = models.ImageField(upload_to='projects/')
description = models.TextField()
url = models.URLField(null=True, blank=True)
from_date = models.DateField(null=True, blank=True)
to_date = models.DateField(null=True, blank=True)
class Meta:
ordering = ['-from_date']
@property
def paragraphs(self):
return self.description.split("\r\n\r\n\r\n")
def save(self, *args, **kwargs):
if self.image:
img = Img.open(BytesIO(self.image.read()))
if img.mode != 'RGB':
img = img.convert('RGB')
img.thumbnail((self.image.width/1.5,self.image.height/1.5), Img.ANTIALIAS)
output = BytesIO()
img.save(output, format='JPEG', quality=80)
output.seek(0)
self.image = InMemoryUploadedFile(output, 'ImageField', "{}.jpg".format(uuid.uuid4().hex), 'image/jpeg', output.tell(), None)
super().save(*args, **kwargs)
| mit |
balthamos/plover | plover/test_translation.py | 3 | 21989 | # Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for translation.py."""
from collections import namedtuple
import copy
from mock import patch
from steno_dictionary import StenoDictionary, StenoDictionaryCollection
from translation import Translation, Translator, _State, _translate_stroke, _lookup
import unittest
from plover.steno import Stroke, normalize_steno
def stroke(s):
keys = []
on_left = True
for k in s:
if k in 'EU*-':
on_left = False
if k == '-':
continue
elif k == '*':
keys.append(k)
elif on_left:
keys.append(k + '-')
else:
keys.append('-' + k)
return Stroke(keys)
class TranslationTestCase(unittest.TestCase):
def test_no_translation(self):
t = Translation([stroke('S'), stroke('T')], None)
self.assertEqual(t.strokes, [stroke('S'), stroke('T')])
self.assertEqual(t.rtfcre, ('S', 'T'))
self.assertIsNone(t.english)
def test_translation(self):
t = Translation([stroke('S'), stroke('T')], 'translation')
self.assertEqual(t.strokes, [stroke('S'), stroke('T')])
self.assertEqual(t.rtfcre, ('S', 'T'))
self.assertEqual(t.english, 'translation')
class TranslatorStateSizeTestCase(unittest.TestCase):
class FakeState(_State):
def __init__(self):
_State.__init__(self)
self.restrict_calls = []
def restrict_size(self, n):
self.restrict_calls.append(n)
def assert_size_call(self, size):
self.assertEqual(self.s.restrict_calls[-1], size)
def assert_no_size_call(self):
self.assertEqual(self.s.restrict_calls, [])
def clear(self):
self.s.restrict_calls = []
def setUp(self):
self.t = Translator()
self.s = type(self).FakeState()
self.t._state = self.s
self.d = StenoDictionary()
self.dc = StenoDictionaryCollection()
self.dc.set_dicts([self.d])
self.t.set_dictionary(self.dc)
def test_dictionary_update_grows_size1(self):
self.d[('S',)] = '1'
self.assert_size_call(1)
def test_dictionary_update_grows_size4(self):
self.d[('S', 'PT', '-Z', 'TOP')] = 'hi'
self.assert_size_call(4)
def test_dictionary_update_no_grow(self):
self.t.set_min_undo_length(4)
self.assert_size_call(4)
self.clear()
self.d[('S', 'T')] = 'nothing'
self.assert_size_call(4)
def test_dictionary_update_shrink(self):
self.d[('S', 'T', 'P', '-Z', '-D')] = '1'
self.assert_size_call(5)
self.clear()
self.d[('A', 'P')] = '2'
self.assert_no_size_call()
del self.d[('S', 'T', 'P', '-Z', '-D')]
self.assert_size_call(2)
def test_dictionary_update_no_shrink(self):
self.t.set_min_undo_length(7)
self.d[('S', 'T', 'P', '-Z', '-D')] = '1'
del self.d[('S', 'T', 'P', '-Z', '-D')]
self.assert_size_call(7)
def test_translation_calls_restrict(self):
self.t.translate(stroke('S'))
self.assert_size_call(0)
class TranslatorTestCase(unittest.TestCase):
def test_translate_calls_translate_stroke(self):
t = Translator()
s = stroke('S')
def check(stroke, state, dictionary, output):
self.assertEqual(stroke, s)
self.assertEqual(state, t._state)
self.assertEqual(dictionary, t._dictionary)
self.assertEqual(output, t._output)
with patch('plover.translation._translate_stroke', check) as _translate_stroke:
t.translate(s)
def test_listeners(self):
output1 = []
def listener1(undo, do, prev):
output1.append((undo, do, prev))
output2 = []
def listener2(undo, do, prev):
output2.append((undo, do, prev))
t = Translator()
s = stroke('S')
tr = Translation([s], None)
expected_output = [([], [tr], tr)]
t.translate(s)
t.add_listener(listener1)
t.translate(s)
self.assertEqual(output1, expected_output)
del output1[:]
t.add_listener(listener2)
t.translate(s)
self.assertEqual(output1, expected_output)
self.assertEqual(output2, expected_output)
del output1[:]
del output2[:]
t.add_listener(listener2)
t.translate(s)
self.assertEqual(output1, expected_output)
self.assertEqual(output2, expected_output)
del output1[:]
del output2[:]
t.remove_listener(listener1)
t.translate(s)
self.assertEqual(output1, [])
self.assertEqual(output2, expected_output)
del output1[:]
del output2[:]
t.remove_listener(listener2)
t.translate(s)
self.assertEqual(output1, [])
self.assertEqual(output2, [])
def test_changing_state(self):
output = []
def listener(undo, do, prev):
output.append((undo, do, prev))
d = StenoDictionary()
d[('S', 'P')] = 'hi'
dc = StenoDictionaryCollection()
dc.set_dicts([d])
t = Translator()
t.set_dictionary(dc)
t.translate(stroke('T'))
t.translate(stroke('S'))
s = copy.deepcopy(t.get_state())
t.add_listener(listener)
expected = [([Translation([stroke('S')], None)],
[Translation([stroke('S'), stroke('P')], 'hi')],
Translation([stroke('T')], None))]
t.translate(stroke('P'))
self.assertEqual(output, expected)
del output[:]
t.set_state(s)
t.translate(stroke('P'))
self.assertEqual(output, expected)
del output[:]
t.clear_state()
t.translate(stroke('P'))
self.assertEqual(output, [([], [Translation([stroke('P')], None)], None)])
del output[:]
t.set_state(s)
t.translate(stroke('P'))
self.assertEqual(output,
[([],
[Translation([stroke('P')], None)],
Translation([stroke('S'), stroke('P')], 'hi'))])
def test_translator(self):
# It's not clear that this test is needed anymore. There are separate
# tests for _translate_stroke and test_translate_calls_translate_stroke
# makes sure that translate calls it properly. But since I already wrote
# this test I'm going to keep it.
class Output(object):
def __init__(self):
self._output = []
def write(self, undo, do, prev):
for t in undo:
self._output.pop()
for t in do:
if t.english:
self._output.append(t.english)
else:
self._output.append('/'.join(t.rtfcre))
def get(self):
return ' '.join(self._output)
def clear(self):
del self._output[:]
d = StenoDictionary()
out = Output()
t = Translator()
dc = StenoDictionaryCollection()
dc.set_dicts([d])
t.set_dictionary(dc)
t.add_listener(out.write)
t.translate(stroke('S'))
self.assertEqual(out.get(), 'S')
t.translate(stroke('T'))
self.assertEqual(out.get(), 'S T')
t.translate(stroke('*'))
self.assertEqual(out.get(), 'S')
t.translate(stroke('*'))
self.assertEqual(out.get(), 'S') # Undo buffer ran out.
t.set_min_undo_length(3)
out.clear()
t.translate(stroke('S'))
self.assertEqual(out.get(), 'S')
t.translate(stroke('T'))
self.assertEqual(out.get(), 'S T')
t.translate(stroke('*'))
self.assertEqual(out.get(), 'S')
t.translate(stroke('*'))
self.assertEqual(out.get(), '')
t.translate(stroke('*'))
self.assertEqual(out.get(), '') # Undo buffer ran out.
out.clear()
d[('S',)] = 't1'
d[('T',)] = 't2'
d[('S', 'T')] = 't3'
t.translate(stroke('S'))
self.assertEqual(out.get(), 't1')
t.translate(stroke('T'))
self.assertEqual(out.get(), 't3')
t.translate(stroke('T'))
self.assertEqual(out.get(), 't3 t2')
t.translate(stroke('S'))
self.assertEqual(out.get(), 't3 t2 t1')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't3 t2')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't3')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't1')
t.translate(stroke('*'))
self.assertEqual(out.get(), '')
t.translate(stroke('S'))
self.assertEqual(out.get(), 't1')
t.translate(stroke('T'))
self.assertEqual(out.get(), 't3')
t.translate(stroke('T'))
self.assertEqual(out.get(), 't3 t2')
d[('S', 'T', 'T')] = 't4'
d[('S', 'T', 'T', 'S')] = 't5'
t.translate(stroke('S'))
self.assertEqual(out.get(), 't5')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't3 t2')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't3')
t.translate(stroke('T'))
self.assertEqual(out.get(), 't4')
t.translate(stroke('S'))
self.assertEqual(out.get(), 't5')
t.translate(stroke('S'))
self.assertEqual(out.get(), 't5 t1')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't5')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't4')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't3')
t.translate(stroke('*'))
self.assertEqual(out.get(), 't1')
t.translate(stroke('*'))
self.assertEqual(out.get(), '')
d.clear()
s = stroke('S')
t.translate(s)
t.translate(s)
t.translate(s)
t.translate(s)
s = stroke('*')
t.translate(s)
t.translate(s)
t.translate(s)
t.translate(s)
self.assertEqual(out.get(), 'S') # Not enough undo to clear output.
out.clear()
t.remove_listener(out.write)
t.translate(stroke('S'))
self.assertEqual(out.get(), '')
class StateTestCase(unittest.TestCase):
def setUp(self):
self.a = Translation([stroke('S')], None)
self.b = Translation([stroke('T'), stroke('-D')], None)
self.c = Translation([stroke('-Z'), stroke('P'), stroke('T*')], None)
def test_last_list0(self):
s = _State()
self.assertIsNone(s.last())
def test_last_list1(self):
s = _State()
s.translations = [self.a]
self.assertEqual(s.last(), self.a)
def test_last_list2(self):
s = _State()
s.translations = [self.a, self.b]
self.assertEqual(s.last(), self.b)
def test_last_tail1(self):
s = _State()
s.translations = [self.a]
s.tail = self.b
self.assertEqual(s.last(), self.a)
def test_last_tail0(self):
s = _State()
s.tail = self.b
self.assertEqual(s.last(), self.b)
def test_restrict_size_zero_on_empty(self):
s = _State()
s.restrict_size(0)
self.assertEquals(s.translations, [])
self.assertIsNone(s.tail)
def test_restrict_size_zero_on_one_stroke(self):
s = _State()
s.translations = [self.a]
s.restrict_size(0)
self.assertEquals(s.translations, [self.a])
self.assertIsNone(s.tail)
def test_restrict_size_to_exactly_one_stroke(self):
s = _State()
s.translations = [self.a]
s.restrict_size(1)
self.assertEquals(s.translations, [self.a])
self.assertIsNone(s.tail)
def test_restrict_size_to_one_on_two_strokes(self):
s = _State()
s.translations = [self.b]
s.restrict_size(1)
self.assertEquals(s.translations, [self.b])
self.assertIsNone(s.tail)
def test_restrict_size_to_one_on_two_translations(self):
s = _State()
s.translations = [self.b, self.a]
s.restrict_size(1)
self.assertEquals(s.translations, [self.a])
self.assertEqual(s.tail, self.b)
def test_restrict_size_to_one_on_two_translations_too_big(self):
s = _State()
s.translations = [self.a, self.b]
s.restrict_size(1)
self.assertEquals(s.translations, [self.b])
self.assertEqual(s.tail, self.a)
def test_restrict_size_lose_translations(self):
s = _State()
s.translations = [self.a, self.b, self.c]
s.restrict_size(2)
self.assertEquals(s.translations, [self.c])
self.assertEqual(s.tail, self.b)
def test_restrict_size_multiple_translations(self):
s = _State()
s.translations = [self.a, self.b, self.c]
s.restrict_size(5)
self.assertEquals(s.translations, [self.b, self.c])
self.assertEqual(s.tail, self.a)
class TranslateStrokeTestCase(unittest.TestCase):
class CaptureOutput(object):
output = namedtuple('output', 'undo do prev')
def __init__(self):
self.output = []
def __call__(self, undo, new, prev):
self.output = type(self).output(undo, new, prev)
def t(self, strokes):
"""A quick way to make a translation."""
strokes = [stroke(x) for x in strokes.split('/')]
return Translation(strokes, _lookup(strokes, self.dc, []))
def lt(self, translations):
"""A quick way to make a list of translations."""
return [self.t(x) for x in translations.split()]
def define(self, key, value):
key = normalize_steno(key)
self.d[key] = value
def translate(self, stroke):
_translate_stroke(stroke, self.s, self.dc, self.o)
def assertTranslations(self, expected):
self.assertEqual(self.s.translations, expected)
def assertOutput(self, undo, do, prev):
self.assertEqual(self.o.output, (undo, do, prev))
def setUp(self):
self.d = StenoDictionary()
self.dc = StenoDictionaryCollection()
self.dc.set_dicts([self.d])
self.s = _State()
self.o = type(self).CaptureOutput()
def test_first_stroke(self):
self.translate(stroke('-B'))
self.assertTranslations(self.lt('-B'))
self.assertOutput([], self.lt('-B'), None)
def test_second_stroke(self):
self.define('S/P', 'spiders')
self.s.translations = self.lt('S')
self.translate(stroke('-T'))
self.assertTranslations(self.lt('S -T'))
self.assertOutput([], self.lt('-T'), self.t('S'))
def test_second_stroke_tail(self):
self.s.tail = self.t('T/A/I/L')
self.translate(stroke('-E'))
self.assertTranslations(self.lt('E'))
self.assertOutput([], self.lt('E'), self.t('T/A/I/L'))
def test_with_translation(self):
self.define('S', 'is')
self.define('-T', 'that')
self.s.translations = self.lt('S')
self.translate(stroke('-T'))
self.assertTranslations(self.lt('S -T'))
self.assertOutput([], self.lt('-T'), self.t('S'))
self.assertEqual(self.o.output.do[0].english, 'that')
def test_finish_two_translation(self):
self.define('S/T', 'hello')
self.s.translations = self.lt('S')
self.translate(stroke('T'))
self.assertTranslations(self.lt('S/T'))
self.assertOutput(self.lt('S'), self.lt('S/T'), None)
self.assertEqual(self.o.output.do[0].english, 'hello')
self.assertEqual(self.o.output.do[0].replaced, self.lt('S'))
def test_finish_three_translation(self):
self.define('S/T/-B', 'bye')
self.s.translations = self.lt('S T')
self.translate(stroke('-B'))
self.assertTranslations(self.lt('S/T/-B'))
self.assertOutput(self.lt('S T'), self.lt('S/T/-B'), None)
self.assertEqual(self.o.output.do[0].english, 'bye')
self.assertEqual(self.o.output.do[0].replaced, self.lt('S T'))
def test_replace_translation(self):
self.define('S/T/-B', 'longer')
self.s.translations = self.lt('S/T')
self.translate(stroke('-B'))
self.assertTranslations(self.lt('S/T/-B'))
self.assertOutput(self.lt('S/T'), self.lt('S/T/-B'), None)
self.assertEqual(self.o.output.do[0].english, 'longer')
self.assertEqual(self.o.output.do[0].replaced, self.lt('S/T'))
def test_undo(self):
self.s.translations = self.lt('POP')
self.translate(stroke('*'))
self.assertTranslations([])
self.assertOutput(self.lt('POP'), [], None)
def test_empty_undo(self):
self.translate(stroke('*'))
self.assertTranslations([])
self.assertOutput([], [], None)
def test_undo_translation(self):
self.define('P/P', 'pop')
self.translate(stroke('P'))
self.translate(stroke('P'))
self.translate(stroke('*'))
self.assertTranslations(self.lt('P'))
self.assertOutput(self.lt('P/P'), self.lt('P'), None)
def test_undo_longer_translation(self):
self.define('P/P/-D', 'popped')
self.translate(stroke('P'))
self.translate(stroke('P'))
self.translate(stroke('-D'))
self.translate(stroke('*'))
self.assertTranslations(self.lt('P P'))
self.assertOutput(self.lt('P/P/-D'), self.lt('P P'), None)
def test_undo_tail(self):
self.s.tail = self.t('T/A/I/L')
self.translate(stroke('*'))
self.assertTranslations([])
self.assertOutput([], [], self.t('T/A/I/L'))
def test_suffix_folding(self):
self.define('K-L', 'look')
self.define('-G', '{^ing}')
lt = self.lt('K-LG')
lt[0].english = 'look {^ing}'
self.translate(stroke('K-LG'))
self.assertTranslations(lt)
def test_suffix_folding_multi_stroke(self):
self.define('E/HR', 'he will')
self.define('-S', '{^s}')
self.translate(stroke('E'))
self.translate(stroke('HR-S'))
output = ' '.join(t.english for t in self.s.translations)
self.assertEqual(output, 'he will {^s}')
def test_suffix_folding_doesnt_interfere(self):
self.define('E/HR', 'he will')
self.define('-S', '{^s}')
self.define('E', 'he')
self.define('HR-S', 'also')
self.translate(stroke('E'))
self.translate(stroke('HR-S'))
output = ' '.join(t.english for t in self.s.translations)
self.assertEqual(output, 'he also')
def test_suffix_folding_no_suffix(self):
self.define('K-L', 'look')
lt = self.lt('K-LG')
self.assertEqual(lt[0].english, None)
self.translate(stroke('K-LG'))
self.assertTranslations(lt)
def test_suffix_folding_no_main(self):
self.define('-G', '{^ing}')
lt = self.lt('K-LG')
self.assertEqual(lt[0].english, None)
self.translate(stroke('K-LG'))
self.assertTranslations(lt)
def test_retrospective_insert_space(self):
self.define('T/E/S/T', 'a longer key')
self.define('PER', 'perfect')
self.define('SWAEUGS', 'situation')
self.define('PER/SWAEUGS', 'persuasion')
self.define('SP*', '{*?}')
self.translate(stroke('PER'))
self.translate(stroke('SWAEUGS'))
self.translate(stroke('SP*'))
lt = self.lt('PER')
undo = self.lt('PER/SWAEUGS')
undo[0].replaced = lt
do = self.lt('SP*')
do[0].english = 'perfect situation'
do[0].is_retrospective_command = True
do[0].replaced = undo
self.assertTranslations(do)
self.assertOutput(undo, do, None)
def test_retrospective_delete_space(self):
self.define('T/E/S/T', 'a longer key')
self.define('K', 'kick')
self.define('B', 'back')
self.define('SP*', '{*!}')
self.translate(stroke('K'))
self.translate(stroke('B'))
self.translate(stroke('SP*'))
undo = self.lt('K B')
do = self.lt('SP*')
do[0].english = 'kickback'
do[0].is_retrospective_command = True
do[0].replaced = undo
self.assertTranslations(do)
self.assertOutput(undo, do, None)
def test_retrospective_toggle_asterisk(self):
self.define('T/E/S/T', 'a longer key')
self.define('S', 'see')
self.define('S*', 'sea')
self.define('A*', '{*}')
self.translate(stroke('S'))
self.translate(stroke('A*'))
undo = self.lt('S')
do = self.lt('S*')
self.assertTranslations(do)
self.assertOutput(undo, do, None)
def test_repeat_last_stroke1(self):
self.define('T/E/S/T', 'a longer key')
self.define('TH', 'this')
self.define('R*', '{*+}')
self.translate(stroke('TH'))
self.translate(stroke('R*'))
undo = []
do = self.lt('TH')
state = self.lt('TH TH')
self.assertTranslations(state)
self.assertOutput(undo, do, do[0])
def test_repeat_last_stroke2(self):
self.define('T/E/S/T', 'a longer key')
self.define('THA', 'that')
self.define('R*', '{*+}')
self.translate(stroke('THA'))
self.translate(stroke('R*'))
undo = []
do = self.lt('THA')
state = self.lt('THA THA')
self.assertTranslations(state)
self.assertOutput(undo, do, do[0])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
glmcdona/meddle | examples/base/Lib/csv.py | 176 | 16344 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = self.reader.next()
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: " +
", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| mit |
poulpito/Flexget | flexget/plugins/internal/urlrewriting.py | 7 | 4808 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('urlrewriter')
class UrlRewritingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PluginUrlRewriting(object):
"""
Provides URL rewriting framework
"""
def __init__(self):
self.disabled_rewriters = []
def on_task_urlrewrite(self, task, config):
log.debug('Checking %s entries', len(task.accepted))
# try to urlrewrite all accepted
for entry in task.accepted:
try:
self.url_rewrite(task, entry)
except UrlRewritingError as e:
log.warning(e.value)
entry.fail()
# API method
def url_rewritable(self, task, entry):
"""Return True if entry is urlrewritable by registered rewriter."""
for urlrewriter in plugin.get_plugins(interface='urlrewriter'):
if urlrewriter.name in self.disabled_rewriters:
log.trace('Skipping rewriter %s since it\'s disabled', urlrewriter.name)
continue
log.trace('checking urlrewriter %s', urlrewriter.name)
if urlrewriter.instance.url_rewritable(task, entry):
return True
return False
# API method - why priority though?
@plugin.priority(255)
def url_rewrite(self, task, entry):
"""Rewrites given entry url. Raises UrlRewritingError if failed."""
tries = 0
while self.url_rewritable(task, entry) and entry.accepted:
tries += 1
if tries > 20:
raise UrlRewritingError('URL rewriting was left in infinite loop while rewriting url for %s, '
'some rewriter is returning always True' % entry)
for urlrewriter in plugin.get_plugins(interface='urlrewriter'):
name = urlrewriter.name
if name in self.disabled_rewriters:
log.trace('Skipping rewriter %s since it\'s disabled', name)
continue
try:
if urlrewriter.instance.url_rewritable(task, entry):
old_url = entry['url']
log.debug('Url rewriting %s' % entry['url'])
urlrewriter.instance.url_rewrite(task, entry)
if entry['url'] != old_url:
if entry.get('urls') and old_url in entry.get('urls'):
entry['urls'][entry['urls'].index(old_url)] = entry['url']
log.info('Entry \'%s\' URL rewritten to %s (with %s)', entry['title'], entry['url'], name)
except UrlRewritingError as r:
# increase failcount
# count = self.shared_cache.storedefault(entry['url'], 1)
# count += 1
raise UrlRewritingError('URL rewriting %s failed: %s' % (name, r.value))
except plugin.PluginError as e:
raise UrlRewritingError('URL rewriting %s failed: %s' % (name, e.value))
except Exception as e:
log.exception(e)
raise UrlRewritingError('%s: Internal error with url %s' % (name, entry['url']))
class DisableUrlRewriter(object):
"""Disable certain urlrewriters."""
schema = {'type': 'array', 'items': {'type': 'string'}}
def on_task_start(self, task, config):
urlrewrite = plugin.get_plugin_by_name('urlrewriting')['instance']
for disable in config:
try:
plugin.get_plugin_by_name(disable)
except plugin.DependencyError:
log.critical('Unknown url-rewriter %s', disable)
continue
log.debug('Disabling url rewriter %s', disable)
urlrewrite.disabled_rewriters.append(disable)
def on_task_exit(self, task, config):
urlrewrite = plugin.get_plugin_by_name('urlrewriting')['instance']
for disable in config:
log.debug('Enabling url rewriter %s', disable)
try:
urlrewrite.disabled_rewriters.remove(disable)
except ValueError:
log.debug('%s does not exists', disable)
on_task_abort = on_task_exit
@event('plugin.register')
def register_plugin():
plugin.register(PluginUrlRewriting, 'urlrewriting', builtin=True, api_ver=2)
plugin.register(DisableUrlRewriter, 'disable_urlrewriters', api_ver=2)
plugin.register_task_phase('urlrewrite', before='download')
| mit |
wvdd007/robomongo | tests/gtest-1.7.0/test/gtest_xml_test_utils.py | 1815 | 8876 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| gpl-3.0 |
kswiat/django | django/contrib/gis/gdal/driver.py | 104 | 2477 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(force_bytes(name))
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count:
capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
| bsd-3-clause |
rschnapka/odoo | addons/crm/crm_segmentation.py | 56 | 9030 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
class crm_segmentation(osv.osv):
'''
A segmentation is a tool to automatically assign categories on partners.
These assignations are based on criterions.
'''
_name = "crm.segmentation"
_description = "Partner Segmentation"
_columns = {
'name': fields.char('Name', size=64, required=True, help='The name of the segmentation.'),
'description': fields.text('Description'),
'categ_id': fields.many2one('res.partner.category', 'Partner Category',\
required=True, help='The partner category that will be \
added to partners that match the segmentation criterions after computation.'),
'exclusif': fields.boolean('Exclusive', help='Check if the category is limited to partners that match the segmentation criterions.\
\nIf checked, remove the category from partners that doesn\'t match segmentation criterions'),
'state': fields.selection([('not running','Not Running'),\
('running','Running')], 'Execution Status', readonly=True),
'partner_id': fields.integer('Max Partner ID processed'),
'segmentation_line': fields.one2many('crm.segmentation.line', \
'segmentation_id', 'Criteria', required=True),
'sales_purchase_active': fields.boolean('Use The Sales Purchase Rules', help='Check if you want to use this tab as part of the segmentation rule. If not checked, the criteria beneath will be ignored')
}
_defaults = {
'partner_id': lambda *a: 0,
'state': lambda *a: 'not running',
}
def process_continue(self, cr, uid, ids, start=False):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process continue’s IDs"""
partner_obj = self.pool.get('res.partner')
categs = self.read(cr, uid, ids, ['categ_id', 'exclusif', 'partner_id',\
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel \
where category_id=%s', (categ['categ_id'][0],))
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) \
values (%s,%s)', (categ['categ_id'][0], partner.id))
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
def process_stop(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process stop’s IDs"""
return self.write(cr, uid, ids, {'state':'not running', 'partner_id':0})
def process_start(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process start’s IDs """
self.write(cr, uid, ids, {'state':'running', 'partner_id':0})
return self.process_continue(cr, uid, ids, start=True)
crm_segmentation()
class crm_segmentation_line(osv.osv):
""" Segmentation line """
_name = "crm.segmentation.line"
_description = "Segmentation line"
_columns = {
'name': fields.char('Rule Name', size=64, required=True),
'segmentation_id': fields.many2one('crm.segmentation', 'Segmentation'),
'expr_name': fields.selection([('sale','Sale Amount'),
('purchase','Purchase Amount')], 'Control Variable', size=64, required=True),
'expr_operator': fields.selection([('<','<'),('=','='),('>','>')], 'Operator', required=True),
'expr_value': fields.float('Value', required=True),
'operator': fields.selection([('and','Mandatory Expression'),\
('or','Optional Expression')],'Mandatory / Optional', required=True),
}
_defaults = {
'expr_name': lambda *a: 'sale',
'expr_operator': lambda *a: '>',
'operator': lambda *a: 'and'
}
def test(self, cr, uid, ids, partner_id):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Test’s IDs """
expression = {'<': lambda x,y: x<y, '=':lambda x,y:x==y, '>':lambda x,y:x>y}
ok = False
lst = self.read(cr, uid, ids)
for l in lst:
cr.execute('select * from ir_module_module where name=%s and state=%s', ('account','installed'))
if cr.fetchone():
if l['expr_name']=='sale':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
elif l['expr_name']=='purchase':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
res = expression[l['expr_operator']](value, l['expr_value'])
if (not res) and (l['operator']=='and'):
return False
if res:
return True
return True
crm_segmentation_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dongjoon-hyun/tensorflow | tensorflow/python/ops/stateless_random_ops.py | 3 | 9709 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("StatelessMultinomial")
ops.NotDifferentiable("StatelessRandomNormal")
ops.NotDifferentiable("StatelessRandomUniform")
ops.NotDifferentiable("StatelessRandomUniformInt")
ops.NotDifferentiable("StatelessTruncatedNormal")
@tf_export("random.stateless_uniform")
def stateless_random_uniform(shape,
seed,
minval=0,
maxval=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a uniform distribution.
This is a stateless version of `tf.random_uniform`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the
range of random values to generate. Defaults to 1 if `dtype` is floating
point.
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.int32, dtypes.int64):
raise ValueError("Invalid dtype %r" % dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "stateless_random_uniform",
[shape, seed, minval, maxval]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
return gen_stateless_random_ops.stateless_random_uniform_int(
shape, seed=seed, minval=minval, maxval=maxval, name=name)
else:
rnd = gen_stateless_random_ops.stateless_random_uniform(
shape, seed=seed, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
@tf_export("random.stateless_normal")
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export("random.stateless_truncated_normal")
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.truncated_normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "stateless_truncated_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_truncated_normal(
shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
@tf_export("random.stateless_multinomial")
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.multinomial`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_multinomial(
tf.log([[10., 10.]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
name: Optional name for the operation.
output_dtype: integer type to use for the output. Defaults to int64.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
logits = ops.convert_to_tensor(logits, name="logits")
return gen_stateless_random_ops.stateless_multinomial(
logits, num_samples, seed, output_dtype=output_dtype)
| apache-2.0 |
keyvalued/keyvalued | keyvalued.py | 1 | 7536 | # Copyright (c) 2014, William Pitcock <nenolod@dereferenced.org>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from collections import OrderedDict
import simplejson as json
import asyncio
import sys
import time
import os
class ExpiringDict(OrderedDict):
def __init__(self, max_len, max_age_seconds):
OrderedDict.__init__(self)
self.max_len = max_len
self.max_age = max_age_seconds
def __contains__(self, key):
try:
item = OrderedDict.__getitem__(self, key)
if time.time() - item[1] < self.max_age:
return True
else:
del self[key]
except KeyError:
pass
return False
def __getitem__(self, key, with_age=False, max_age=None):
item = OrderedDict.__getitem__(self, key)
item_age = time.time() - item[1]
if not max_age:
max_age = self.max_age
if item_age < max_age:
if with_age:
return item[0], item_age
else:
return item[0]
else:
del self[key]
raise KeyError(key)
def __setitem__(self, key, value):
if len(self) == self.max_len:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, (value, time.time()))
def pop(self, key, default=None):
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default
def get(self, key, default=None, with_age=False, max_age=None):
try:
return self.__getitem__(key, with_age, max_age)
except KeyError:
if with_age:
return default, None
else:
return default
def put(self, key, value, ts=None):
if len(self) == self.max_len:
self.popitem(last=False)
if not ts:
ts = time.time()
OrderedDict.__setitem__(self, key, (value, ts))
def items(self):
r = []
for key in self:
try:
r.append((key, self[key]))
except KeyError:
pass
return r
def values(self):
r = []
for key in self:
try:
r.append(self[key])
except KeyError:
pass
return r
def fromkeys(self):
raise NotImplementedError()
def iteritems(self):
raise NotImplementedError()
def itervalues(self):
raise NotImplementedError()
def viewitems(self):
raise NotImplementedError()
def viewkeys(self):
raise NotImplementedError()
def viewvalues(self):
raise NotImplementedError()
indexes = dict()
class Client(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def lock(self, index, key, token, max_len=256000, max_age=600):
idx = indexes.get(index, ExpiringDict(max_len=max_len, max_age_seconds=max_age))
indexes[index] = idx
ldict = idx.get('_locks', ExpiringDict(max_len=256000, max_age_seconds=10))
lock = None
if key in ldict:
lock = ldict[key]
if not lock and token:
lock = token
if not lock:
return None
ldict[key] = lock
indexes[index]['_locks'] = ldict
return lock
def unlock(self, index, key, token, max_len=256000, max_age=600):
idx = indexes.get(index, ExpiringDict(max_len=max_len, max_age_seconds=max_age))
indexes[index] = idx
ldict = idx.get('_locks', ExpiringDict(max_len=256000, max_age_seconds=10))
lock = None
if key in ldict:
lock = ldict[key]
if not lock:
return False
if lock == token:
ldict.pop(key)
else:
return False
indexes[index]['_locks'] = ldict
return True
def lookup(self, index, key):
idx = indexes.get(index, None)
if not idx:
return self.error('Index not found')
lock = self.lock(index, key, None)
if lock:
return self.reply({'index': index, 'key': key, '_locked': lock})
obj = idx.get(key)
return self.reply({'index': index, 'key': key, '_source': obj, '_version': 1})
def index(self, index, key, obj, expiry=600, max_len=256000, max_age=600):
idx = indexes.get(index, ExpiringDict(max_len=max_len, max_age_seconds=max_age))
idx.put(key, obj, time.time() + expiry)
indexes[index] = idx
return self.reply({'index': index, 'key': key, '_source': obj, '_version': 1})
def r_lock_op(self, index, key, token):
l = self.lock(index, key, token)
if l != token:
return self.reply({'index': index, 'key': key, '_locked': l})
return self.reply({'index': index, 'key': key, 'locked': True, 'token': l})
def r_unlock_op(self, index, key, token):
s = self.unlock(index, key, token)
return self.reply({'index': index, 'key': key, 'unlocked': s})
# rough idea for get/index ops:
# > {'index': '20141217.hits', 'key': 12}
# < {'index': '20141217.hits', 'key': 12, '_source': {'count': 12}}
def handle_get_or_index(self, o):
if not o['key'] or not o['index']:
return self.error('no index or key')
if o.get('_source', None):
return self.index(o['index'], o['key'], o['_source'], expiry=o.get('_expiry', 600), max_len=o.get('index.max_len', 256000), max_age=o.get('index.max_age', 600))
return self.lookup(o['index'], o['key'])
def data_received(self, data):
o = json.loads(data.decode('UTF-8', 'replace').strip('\r\n'))
act = o.get('_action', None)
if not act:
return self.handle_get_or_index(o)
if act == 'r_lock':
return self.r_lock_op(o['index'], o['key'], o['token'])
if act == 'r_unlock':
return self.r_unlock_op(o['index'], o['key'], o['token'])
return self.error('Unknown action requested')
def reply(self, message):
self.transport.write(bytes(json.dumps(message) + "\r\n", 'UTF-8'))
self.transport.close()
return True
def error(self, error_message):
self.reply({'error': error_message})
return False
def main():
try:
os.unlink('/tmp/keyvalued.sock')
except:
pass
loop = asyncio.get_event_loop()
coro = loop.create_unix_server(Client, '/tmp/keyvalued.sock', backlog=65535)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| isc |
Shabo0oli/jajoo | jajoo/web/views.py | 1 | 5813 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import logout, authenticate, login
from django.shortcuts import redirect
from django.contrib.auth.models import User
from .models import UserInfo, Place, Comment, Booking
from datetime import datetime, date, time
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
allplace = Place.objects.all()
context = {}
context['places'] = allplace
return render(request, 'index.html', context)
@csrf_exempt
def login_view(request):
if 'username' in request.POST and 'password' in request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
usrInfo = UserInfo.objects.filter(Username=user)
context = {}
#context['avatar'] = usrInfo[0].Avatar.url[4:]
allplace = Place.objects.all()
context['places'] = allplace
request.session['userInfo'] = usrInfo[0].Avatar.url[4:]
request.session['user'] = user.username
return render(request, 'index.html', context)
else:
context = {}
context['message'] ='نام کاربری یا پسورد وارد شده اشتباه میباشد'
return render(request, 'login.html', context)
else:
context = {}
return render(request, 'login.html', context)
def register(request):
if 'username' in request.POST and 'password' in request.POST and 'email' in request.POST:
if User.objects.filter(email=request.POST['email']).exists() or User.objects.filter(username=request.POST['username']).exists():
context = {}
context['message']='مشخصات وارد شده تکراری میباشد'
return render(request, 'register.html', context)
else:
username = request.POST['username']
password = request.POST['password']
phone = request.POST['phone']
email = request.POST['email']
user = User.objects.create_user(username, email, password)
user.save()
info = UserInfo(Username=user, Phone=phone)
info.save()
context = {}
return redirect('/')
else:
context = {}
return render(request, 'register.html', context)
def logout_view(request):
logout(request)
return redirect('/')
def place(request , placeid):
thisplace = Place.objects.get(id=placeid)
comments = Comment.objects.filter(Place_id=placeid)
context = {}
context['place'] = thisplace
context['comments'] = comments
return render(request, 'place.html', context)
def search(request):
context = {}
req = request.POST.get('srchterm', False)
result = Place.objects.filter(Address__contains=req)
if not result:
context['notFind'] = True
context['places'] = result
return render(request, 'index.html', context)
@login_required
def comment(request):
text = request.POST['textcomment']
date = datetime.now()
writer = request.user
complace = Place.objects.get(id=request.POST['place'])
com = Comment(Text=text, CreationDate=date, Writer=writer, Place=complace)
com.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
def booking(request):
checkindate = request.POST['CheckInDate_0']
checkintime = request.POST['CheckInDate_1']
y,m,d = checkindate.split('-')
h,t,s = checkintime.split(':')
checkindatetime =datetime.combine(date(int(y),int(m),int(d)), time(int(h),int(t),int(s)))
checkoutdate = request.POST['CheckOutDate_0']
checkouttime = request.POST['CheckOutDate_1']
y, m, d = checkoutdate.split('-')
h, t, s = checkouttime.split(':')
checkoutdatetime = datetime.combine(date(int(y), int(m), int(d)), time(int(h), int(t), int(s)))
bookplace=complace = Place.objects.get(id=request.POST['place'])
book =Booking(CheckInDate=checkindatetime, CheckOutDate=checkoutdatetime, Guest=request.user, Place=bookplace)
book.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def myrequest(request):
if 'accept' in request.POST:
b = Booking.objects.get(id=request.POST['accept'])
b.Status = 'Accept'
b.save()
elif 'reject' in request.POST:
b = Booking.objects.get(id=request.POST['reject'])
b.Status = 'Reject'
b.save()
context = {}
req = Booking.objects.filter(Place__Owner=request.user)
mybooking = Booking.objects.filter(Guest=request.user)
if not req:
context['requests'] = True
context['requests'] = req
if not mybooking:
context['mybooking'] = True
context['mybooking'] = mybooking
return render(request, 'myrequest.html', context)
def addplace(request):
context = {}
if request.method == "POST":
costperday = request.POST['CostPerDay']
address = request.POST['Address']
bedroom = request.POST['Bedroom']
haswifi = bool(request.POST.get('HasWifi') == '1')
hasparking = bool(request.POST.get('HasParking') == '1')
hasbath = bool(request.POST.get('HasBath') == '1')
hastv = bool(request.POST.get('HasTv') == '1')
newplace = Place(Owner=request.user, CostPerDay=int(costperday), Address=address, Bedroom=bedroom, HasBath=hasbath, HasParking=hasparking, HasWifi=haswifi, HasTv=hastv)
newplace.save()
return index(request)
else:
return render(request, 'addplace.html', context) | gpl-2.0 |
saurabh3949/mxnet | example/reinforcement-learning/parallel_actor_critic/config.py | 52 | 1710 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
class Config(object):
def __init__(self, args):
# Default training settings
self.ctx = mx.gpu(0) if args.gpu else mx.cpu()
self.init_func = mx.init.Xavier(rnd_type='uniform', factor_type="in",
magnitude=1)
self.learning_rate = 1e-3
self.update_rule = "adam"
self.grad_clip = True
self.clip_magnitude = 40
# Default model settings
self.hidden_size = 200
self.gamma = 0.99
self.lambda_ = 1.0
self.vf_wt = 0.5 # Weight of value function term in the loss
self.entropy_wt = 0.01 # Weight of entropy term in the loss
self.num_envs = 16
self.t_max = 50
# Override defaults with values from `args`.
for arg in self.__dict__:
if arg in args.__dict__:
self.__setattr__(arg, args.__dict__[arg])
| apache-2.0 |
chienlieu2017/it_management | odoo/addons/base_setup/models/res_config.py | 20 | 4619 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class BaseConfigSettings(models.TransientModel):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
group_multi_company = fields.Boolean(string='Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.',
implied_group='base.group_multi_company')
company_id = fields.Many2one('res.company', string='Company', required=True,
default=lambda self: self.env.user.company_id)
module_share = fields.Boolean(string='Allow documents sharing',
help="""Share or embed any screen of Odoo.""")
module_portal = fields.Boolean(string='Activate the customer portal',
help="""Give your customers access to their documents.""")
module_auth_oauth = fields.Boolean(string='Use external authentication providers (OAuth)')
module_base_import = fields.Boolean(string="Allow users to import data from CSV/XLS/XLSX/ODS files")
module_google_drive = fields.Boolean(string='Attach Google documents to any record',
help="""This installs the module google_docs.""")
module_google_calendar = fields.Boolean(
string='Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar.""")
module_inter_company_rules = fields.Boolean(string='Manage Inter Company',
help="""This installs the module inter_company_rules.\n Configure company rules to automatically create SO/PO when one of your company sells/buys to another of your company.""")
company_share_partner = fields.Boolean(string='Share partners to all companies',
help="Share your partners to all companies defined in your instance.\n"
" * Checked : Partners are visible for every companies, even if a company is defined on the partner.\n"
" * Unchecked : Each company can see only its partner (partners where company is defined). Partners not related to a company are visible for all companies.")
group_multi_currency = fields.Boolean(string='Allow multi currencies',
implied_group='base.group_multi_currency',
help="Allows to work in a multi currency environment")
# Report config from base/res/res_company.py
custom_footer = fields.Boolean(related="company_id.custom_footer", string="Custom footer *", help="Check this to define the report footer manually. Otherwise it will be filled in automatically.")
rml_footer = fields.Text(related="company_id.rml_footer", string='Custom Report Footer *', help="Footer text displayed at the bottom of all reports.")
rml_footer_readonly = fields.Text(related='rml_footer', string='Report Footer *', readonly=True)
rml_paper_format = fields.Selection(related="company_id.rml_paper_format", string="Paper Format *", required=True)
font = fields.Many2one(related='company_id.font', string="Font *", help="Set the font into the report header, it will be used as default font in the RML reports of the user company")
rml_header = fields.Text(related="company_id.rml_header", string="RML Header *")
rml_header2 = fields.Text(related="company_id.rml_header2", string='RML Internal Header *')
rml_header3 = fields.Text(related="company_id.rml_header3", string='RML Internal Header for Landscape Reports *')
@api.multi
def open_company(self):
return {
'type': 'ir.actions.act_window',
'name': 'My Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': self.env.user.company_id.id,
'target': 'current',
}
@api.multi
def open_default_user(self):
action = self.env.ref('base.action_res_users').read()[0]
action['context'] = self.env.context
action['res_id'] = self.env.ref('base.default_user').id
action['views'] = [[self.env.ref('base.view_users_form').id, 'form']]
return action
@api.model
def get_default_company_share_partner(self, fields):
return {
'company_share_partner': not self.env.ref('base.res_partner_rule').active
}
@api.multi
def set_default_company_share_partner(self):
partner_rule = self.env.ref('base.res_partner_rule')
for config in self:
partner_rule.write({'active': not config.company_share_partner})
def act_discover_fonts(self):
self.company_id.act_discover_fonts()
| gpl-3.0 |
cyandterry/Python-Study | Interviews/Flattening_a_Linked_List.py | 2 | 1343 | """
Given a linked list where every node represents a linked list and contains two pointers of its type:
(i) Pointer to next node in the main list (we call it ‘right’ pointer in below code)
(ii) Pointer to a linked list where this node is head (we call it ‘down’ pointer in below code).
All linked lists are sorted. See the following example
```
5 -> 10 -> 19 -> 28
| | | |
V V V V
7 20 22 35
| | |
V V V
8 50 40
| |
V V
30 45
```
"""
class Node():
def __init__(self, data):
self.right = None
self.down = None
self.data = data
def flatten(node):
if not node or not node.right:
return node
return merge(node, flatten(node.right))
def merge(node1, node2):
if not node1:
return node2
if not node2:
return node1
if node1.data < node2.data:
head = node1
head.next = merge(node1.next, node2)
else:
head = node2
head.next = merge(node1, node2.next)
return head
# Notice:
# This is very qiao miao. Using recursion is like DFS, so will first flatten from the end.
# When we want to flatten the front part, the back parts are already flattened
| mit |
pymedusa/SickRage | ext/wrapt/wrappers.py | 10 | 32389 | import os
import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, '__qualname__', wrapped.__qualname__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
@property
def __annotations__(self):
return self.__wrapped__.__anotations__
@__annotations__.setter
def __annotations__(self, value):
self.__wrapped__.__annotations__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if PY3:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return '<%s at 0x%x for %s at 0x%x>' % (
type(self).__name__, id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__))
def __reversed__(self):
return reversed(self.__wrapped__)
if PY3:
def __round__(self):
return round(self.__wrapped__)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith('_self_'):
object.__setattr__(self, name, value)
elif name == '__wrapped__':
object.__setattr__(self, name, value)
try:
object.__delattr__(self, '__qualname__')
except AttributeError:
pass
try:
object.__setattr__(self, '__qualname__', value.__qualname__)
except AttributeError:
pass
elif name == '__qualname__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith('_self_'):
object.__delattr__(self, name)
elif name == '__wrapped__':
raise TypeError('__wrapped__ must be an object')
elif name == '__qualname__':
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = ('_self_instance', '_self_wrapper', '_self_enabled',
'_self_binding', '_self_parent')
def __init__(self, wrapped, instance, wrapper, enabled=None,
binding='function', parent=None):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, '_self_instance', instance)
object.__setattr__(self, '_self_wrapper', wrapper)
object.__setattr__(self, '_self_enabled', enabled)
object.__setattr__(self, '_self_binding', binding)
object.__setattr__(self, '_self_parent', parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(descriptor, instance,
self._self_wrapper, self._self_enabled,
self._self_binding, self)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == 'function':
descriptor = self._self_parent.__wrapped__.__get__(
instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor, instance, self._self_wrapper,
self._self_enabled, self._self_binding,
self._self_parent)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding == 'function':
if self._self_instance is None:
instance = getattr(self.__wrapped__, '__self__', None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance,
args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == 'function':
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError('missing 1 required positional argument')
instance, args = args[0], args[1:]
wrapped = functools.partial(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, '__self__', None)
return self._self_wrapper(self.__wrapped__, instance, args,
kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = 'classmethod'
elif isinstance(wrapped, staticmethod):
binding = 'staticmethod'
elif hasattr(wrapped, '__self__'):
if inspect.isclass(wrapped.__self__):
binding = 'classmethod'
else:
binding = 'function'
else:
binding = 'function'
super(FunctionWrapper, self).__init__(wrapped, None, wrapper,
enabled, binding)
try:
if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'):
from ._wrappers import (ObjectProxy, CallableObjectProxy,
FunctionWrapper, BoundFunctionWrapper, _FunctionWrapperBase)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split('.')
attribute = path[0]
original = getattr(parent, attribute)
for attribute in path[1:]:
parent = original
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
if inspect.isclass(original):
for cls in inspect.getmro(original):
if attribute in vars(cls):
original = vars(cls)[attribute]
break
else:
original = getattr(original, attribute)
else:
original = getattr(original, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit('.', 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ('_self_expired', '_self_instance')
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self,
callback=callback)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance,
_callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback))
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback))
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs)
| gpl-3.0 |
a0c/odoo | addons/report_webkit/__init__.py | 382 | 1593 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import header
import company
import report_helper
import webkit_report
import ir_report
import wizard
import convert
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
radxa/linux-rockchip | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
olivierkes/manuskript | manuskript/ui/search_ui.py | 2 | 2067 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'manuskript/ui/search_ui.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_search(object):
def setupUi(self, search):
search.setObjectName("search")
search.resize(400, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(search)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.searchTextInput = QtWidgets.QLineEdit(search)
self.searchTextInput.setInputMask("")
self.searchTextInput.setFrame(False)
self.searchTextInput.setClearButtonEnabled(True)
self.searchTextInput.setObjectName("searchTextInput")
self.horizontalLayout.addWidget(self.searchTextInput)
self.btnOptions = QtWidgets.QPushButton(search)
self.btnOptions.setText("")
icon = QtGui.QIcon.fromTheme("edit-find")
self.btnOptions.setIcon(icon)
self.btnOptions.setCheckable(True)
self.btnOptions.setFlat(True)
self.btnOptions.setObjectName("btnOptions")
self.horizontalLayout.addWidget(self.btnOptions)
self.verticalLayout.addLayout(self.horizontalLayout)
self.result = QtWidgets.QListWidget(search)
self.result.setFrameShape(QtWidgets.QFrame.NoFrame)
self.result.setObjectName("result")
self.verticalLayout.addWidget(self.result)
self.retranslateUi(search)
QtCore.QMetaObject.connectSlotsByName(search)
def retranslateUi(self, search):
_translate = QtCore.QCoreApplication.translate
search.setWindowTitle(_translate("search", "Form"))
self.searchTextInput.setPlaceholderText(_translate("search", "Search for..."))
| gpl-3.0 |
mmpagani/oq-hazardlib | openquake/hazardlib/geo/surface/complex_fault.py | 2 | 12074 | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.geo.surface.complex_fault` defines
:class:`ComplexFaultSurface`.
"""
import numpy
import shapely
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.surface.base import BaseQuadrilateralSurface
from openquake.hazardlib.geo.surface.planar import PlanarSurface
from openquake.hazardlib.geo.mesh import Mesh, RectangularMesh
from openquake.hazardlib.geo.utils import spherical_to_cartesian
class ComplexFaultSurface(BaseQuadrilateralSurface):
"""
Represent a complex fault surface as 3D mesh of points (not necessarily
uniformly spaced across the surface area).
:param mesh:
Instance of :class:`~openquake.hazardlib.geo.mesh.RectangularMesh`
representing surface geometry.
Another way to construct the surface object is to call
:meth:`from_fault_data`.
"""
def __init__(self, mesh):
super(ComplexFaultSurface, self).__init__()
self.mesh = mesh
assert not 1 in self.mesh.shape
self.strike = self.dip = None
# A common user error is to create a ComplexFaultSourceSurface
# from invalid fault data (e.g. mixing the order of
# vertexes for top and bottom edges). Therefore, we want to
# restrict every complex source to have a projected enclosing
# polygon that is not a multipolygon.
if isinstance(
self.get_mesh()._get_proj_enclosing_polygon()[1],
shapely.geometry.multipolygon.MultiPolygon):
raise ValueError("Invalid surface. "
"The projected enclosing polygon "
"must be a simple polygon. "
"Check the geometry definition of the "
"fault source")
def _create_mesh(self):
"""
Return a mesh provided to object's constructor.
"""
return self.mesh
def get_dip(self):
"""
Return the fault dip as the average dip over the mesh.
The average dip is defined as the weighted mean inclination
of all the mesh cells. See
:meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth`
:returns:
The average dip, in decimal degrees.
"""
# uses the same approach as in simple fault surface
if self.dip is None:
mesh = self.get_mesh()
self.dip, self.strike = mesh.get_mean_inclination_and_azimuth()
return self.dip
def get_strike(self):
"""
Return the fault strike as the average strike over the mesh.
The average strike is defined as the weighted mean azimuth
of all the mesh cells. See
:meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth`
:returns:
The average strike, in decimal degrees.
"""
if self.strike is None:
self.get_dip() # this should cache strike value
return self.strike
@classmethod
def check_aki_richards_convention(cls, edges):
"""
Verify that surface (as defined by corner points) conforms with Aki and
Richard convention (i.e. surface dips right of surface strike)
This method doesn't have to be called by hands before creating the
surface object, because it is called from :meth:`from_fault_data`.
"""
# 1) extract 4 corner points of surface mesh
# 2) compute cross products between left and right edges and top edge
# (these define vectors normal to the surface)
# 3) compute dot products between cross product results and
# position vectors associated with upper left and right corners (if
# both angles are less then 90 degrees then the surface is correctly
# defined)
ul = edges[0].points[0]
ur = edges[0].points[-1]
bl = edges[-1].points[0]
br = edges[-1].points[-1]
ul, ur, bl, br = spherical_to_cartesian(
[ul.longitude, ur.longitude, bl.longitude, br.longitude],
[ul.latitude, ur.latitude, bl.latitude, br.latitude],
[ul.depth, ur.depth, bl.depth, br.depth],
)
top_edge = ur - ul
left_edge = bl - ul
right_edge = br - ur
left_cross_top = numpy.cross(left_edge, top_edge)
right_cross_top = numpy.cross(right_edge, top_edge)
left_cross_top /= numpy.sqrt(numpy.dot(left_cross_top, left_cross_top))
right_cross_top /= numpy.sqrt(
numpy.dot(right_cross_top, right_cross_top)
)
ul /= numpy.sqrt(numpy.dot(ul, ul))
ur /= numpy.sqrt(numpy.dot(ur, ur))
# rounding to 1st digit, to avoid ValueError raised for floating point
# imprecision
angle_ul = round(
numpy.degrees(numpy.arccos(numpy.dot(ul, left_cross_top))), 1
)
angle_ur = round(
numpy.degrees(numpy.arccos(numpy.dot(ur, right_cross_top))), 1
)
if (angle_ul > 90) or (angle_ur > 90):
raise ValueError(
"Surface does not conform with Aki & Richards convention"
)
@classmethod
def check_surface_validity(cls, edges):
"""
Check validity of the surface.
Project edge points to vertical plane anchored to surface upper left
edge and with strike equal to top edge strike. Check that resulting
polygon is valid.
This method doesn't have to be called by hands before creating the
surface object, because it is called from :meth:`from_fault_data`.
"""
# extract coordinates of surface boundary (as defined from edges)
full_boundary = []
left_boundary = []
right_boundary = []
for i in range(1, len(edges) - 1):
left_boundary.append(edges[i].points[0])
right_boundary.append(edges[i].points[-1])
full_boundary.extend(edges[0].points)
full_boundary.extend(right_boundary)
full_boundary.extend(edges[-1].points[::-1])
full_boundary.extend(left_boundary[::-1])
lons = [p.longitude for p in full_boundary]
lats = [p.latitude for p in full_boundary]
depths = [p.depth for p in full_boundary]
# define reference plane. Corner points are separated by an arbitrary
# distance of 10 km. The mesh spacing is set to 2 km. Both corner
# distance and mesh spacing values do not affect the algorithm results.
ul = edges[0].points[0]
strike = ul.azimuth(edges[0].points[-1])
dist = 10.
mesh_spacing = 2.
ur = ul.point_at(dist, 0, strike)
bl = Point(ul.longitude, ul.latitude, ul.depth + dist)
br = bl.point_at(dist, 0, strike)
# project surface boundary to reference plane and check for
# validity.
ref_plane = PlanarSurface.from_corner_points(
mesh_spacing, ul, ur, br, bl
)
_, xx, yy = ref_plane._project(lons, lats, depths)
coords = [(x, y) for x, y in zip(xx, yy)]
p = shapely.geometry.Polygon(coords)
if not p.is_valid:
raise ValueError('Edges points are not in the right order')
@classmethod
def check_fault_data(cls, edges, mesh_spacing):
"""
Verify the fault data and raise ``ValueError`` if anything is wrong.
This method doesn't have to be called by hands before creating the
surface object, because it is called from :meth:`from_fault_data`.
"""
if not len(edges) >= 2:
raise ValueError("at least two edges are required")
if not all(len(edge) >= 2 for edge in edges):
raise ValueError("at least two points must be defined "
"in each edge")
if not mesh_spacing > 0.0:
raise ValueError("mesh spacing must be positive")
cls.check_surface_validity(edges)
cls.check_aki_richards_convention(edges)
@classmethod
def from_fault_data(cls, edges, mesh_spacing):
"""
Create and return a fault surface using fault source data.
:param edges:
A list of at least two horizontal edges of the surface
as instances of :class:`openquake.hazardlib.geo.line.Line`. The
list should be in top-to-bottom order (the shallowest edge first).
:param mesh_spacing:
Distance between two subsequent points in a mesh, in km.
:returns:
An instance of :class:`ComplexFaultSurface` created using
that data.
:raises ValueError:
If requested mesh spacing is too big for the surface geometry
(doesn't allow to put a single mesh cell along length and/or
width).
Uses :meth:`check_fault_data` for checking parameters.
"""
cls.check_fault_data(edges, mesh_spacing)
mean_length = numpy.mean([edge.get_length() for edge in edges])
num_hor_points = int(round(mean_length / mesh_spacing)) + 1
if num_hor_points <= 1:
raise ValueError(
'mesh spacing %.1f km is too big for mean length %.1f km' %
(mesh_spacing, mean_length)
)
edges = [edge.resample_to_num_points(num_hor_points).points
for i, edge in enumerate(edges)]
vert_edges = [Line(v_edge) for v_edge in zip(*edges)]
mean_width = numpy.mean([v_edge.get_length() for v_edge in vert_edges])
num_vert_points = int(round(mean_width / mesh_spacing)) + 1
if num_vert_points <= 1:
raise ValueError(
'mesh spacing %.1f km is too big for mean width %.1f km' %
(mesh_spacing, mean_width)
)
points = zip(*[v_edge.resample_to_num_points(num_vert_points).points
for v_edge in vert_edges])
mesh = RectangularMesh.from_points_list(points)
assert 1 not in mesh.shape
return cls(mesh)
@classmethod
def surface_projection_from_fault_data(cls, edges):
"""
Get a surface projection of the complex fault surface.
:param edges:
A list of horizontal edges of the surface as instances
of :class:`openquake.hazardlib.geo.line.Line`.
:returns:
Instance of :class:`~openquake.hazardlib.geo.polygon.Polygon`
describing the surface projection of the complex fault.
"""
# collect lons and lats of all the vertices of all the edges
lons = []
lats = []
for edge in edges:
for point in edge:
lons.append(point.longitude)
lats.append(point.latitude)
lons = numpy.array(lons, dtype=float)
lats = numpy.array(lats, dtype=float)
return Mesh(lons, lats, depths=None).get_convex_hull()
def get_width(self):
"""
Return surface's width (that is surface extension along the
dip direction) in km.
The width is computed as the average width along the surface.
See
:meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_width`
"""
return self.mesh.get_mean_width()
| agpl-3.0 |
telerik/cloudbase-init | cloudbaseinit/plugins/windows/cryptoapi.py | 1 | 8468 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ctypes
from ctypes import windll
from ctypes import wintypes
class CryptoAPIException(Exception):
def __init__(self):
message = self._get_windows_error()
super(CryptoAPIException, self).__init__(message)
def _get_windows_error(self):
err_code = GetLastError()
return "CryptoAPI error: 0x%0x" % err_code
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', wintypes.WORD),
('wMonth', wintypes.WORD),
('wDayOfWeek', wintypes.WORD),
('wDay', wintypes.WORD),
('wHour', wintypes.WORD),
('wMinute', wintypes.WORD),
('wSecond', wintypes.WORD),
('wMilliseconds', wintypes.WORD),
]
class CERT_CONTEXT(ctypes.Structure):
_fields_ = [
('dwCertEncodingType', wintypes.DWORD),
('pbCertEncoded', ctypes.POINTER(wintypes.BYTE)),
('cbCertEncoded', wintypes.DWORD),
('pCertInfo', ctypes.c_void_p),
('hCertStore', wintypes.HANDLE),
]
class CRYPTOAPI_BLOB(ctypes.Structure):
_fields_ = [
('cbData', wintypes.DWORD),
('pbData', ctypes.POINTER(wintypes.BYTE)),
]
class CRYPT_ALGORITHM_IDENTIFIER(ctypes.Structure):
_fields_ = [
('pszObjId', wintypes.LPSTR),
('Parameters', CRYPTOAPI_BLOB),
]
class CRYPT_KEY_PROV_PARAM(ctypes.Structure):
_fields_ = [
('dwParam', wintypes.DWORD),
('pbData', ctypes.POINTER(wintypes.BYTE)),
('cbData', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
]
class CRYPT_KEY_PROV_INFO(ctypes.Structure):
_fields_ = [
('pwszContainerName', wintypes.LPWSTR),
('pwszProvName', wintypes.LPWSTR),
('dwProvType', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('cProvParam', wintypes.DWORD),
('cProvParam', ctypes.POINTER(CRYPT_KEY_PROV_PARAM)),
('dwKeySpec', wintypes.DWORD),
]
AT_SIGNATURE = 2
CERT_NAME_UPN_TYPE = 8
CERT_SHA1_HASH_PROP_ID = 3
CERT_STORE_ADD_REPLACE_EXISTING = 3
CERT_STORE_PROV_SYSTEM = wintypes.LPSTR(10)
CERT_SYSTEM_STORE_CURRENT_USER = 65536
CERT_SYSTEM_STORE_LOCAL_MACHINE = 131072
CERT_X500_NAME_STR = 3
CRYPT_MACHINE_KEYSET = 32
CRYPT_NEWKEYSET = 8
CRYPT_STRING_BASE64 = 1
PKCS_7_ASN_ENCODING = 65536
PROV_RSA_FULL = 1
X509_ASN_ENCODING = 1
szOID_PKIX_KP_SERVER_AUTH = "1.3.6.1.5.5.7.3.1"
szOID_RSA_SHA1RSA = "1.2.840.113549.1.1.5"
advapi32 = windll.advapi32
crypt32 = windll.crypt32
kernel32 = windll.kernel32
advapi32.CryptAcquireContextW.restype = wintypes.BOOL
advapi32.CryptAcquireContextW.argtypes = [wintypes.HANDLE, wintypes.LPCWSTR,
wintypes.LPCWSTR, wintypes.DWORD,
wintypes.DWORD]
CryptAcquireContext = advapi32.CryptAcquireContextW
advapi32.CryptReleaseContext.restype = wintypes.BOOL
advapi32.CryptReleaseContext.argtypes = [wintypes.HANDLE, wintypes.DWORD]
CryptReleaseContext = advapi32.CryptReleaseContext
advapi32.CryptGenKey.restype = wintypes.BOOL
advapi32.CryptGenKey.argtypes = [wintypes.HANDLE,
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(wintypes.HANDLE)]
CryptGenKey = advapi32.CryptGenKey
advapi32.CryptDestroyKey.restype = wintypes.BOOL
advapi32.CryptDestroyKey.argtypes = [wintypes.HANDLE]
CryptDestroyKey = advapi32.CryptDestroyKey
crypt32.CertStrToNameW.restype = wintypes.BOOL
crypt32.CertStrToNameW.argtypes = [wintypes.DWORD, wintypes.LPCWSTR,
wintypes.DWORD, ctypes.c_void_p,
ctypes.POINTER(wintypes.BYTE),
ctypes.POINTER(wintypes.DWORD),
ctypes.POINTER(wintypes.LPCWSTR)]
CertStrToName = crypt32.CertStrToNameW
# TODO(alexpilotti): this is not a CryptoAPI funtion, putting it in a separate
# module would be more correct
kernel32.GetSystemTime.restype = None
kernel32.GetSystemTime.argtypes = [ctypes.POINTER(SYSTEMTIME)]
GetSystemTime = kernel32.GetSystemTime
# TODO(alexpilotti): this is not a CryptoAPI funtion, putting it in a separate
# module would be more correct
kernel32.GetLastError.restype = wintypes.DWORD
kernel32.GetLastError.argtypes = []
GetLastError = kernel32.GetLastError
crypt32.CertCreateSelfSignCertificate.restype = ctypes.POINTER(CERT_CONTEXT)
crypt32.CertCreateSelfSignCertificate.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CRYPTOAPI_BLOB),
wintypes.DWORD,
ctypes.POINTER(CRYPT_KEY_PROV_INFO),
ctypes.POINTER(CRYPT_ALGORITHM_IDENTIFIER),
ctypes.POINTER(SYSTEMTIME),
ctypes.POINTER(SYSTEMTIME),
# PCERT_EXTENSIONS
ctypes.c_void_p]
CertCreateSelfSignCertificate = crypt32.CertCreateSelfSignCertificate
crypt32.CertAddEnhancedKeyUsageIdentifier.restype = wintypes.BOOL
crypt32.CertAddEnhancedKeyUsageIdentifier.argtypes = [
ctypes.POINTER(CERT_CONTEXT),
wintypes.LPCSTR]
CertAddEnhancedKeyUsageIdentifier = crypt32.CertAddEnhancedKeyUsageIdentifier
crypt32.CertOpenStore.restype = wintypes.HANDLE
crypt32.CertOpenStore.argtypes = [wintypes.LPCSTR, wintypes.DWORD,
wintypes.HANDLE, wintypes.DWORD,
ctypes.c_void_p]
CertOpenStore = crypt32.CertOpenStore
crypt32.CertAddCertificateContextToStore.restype = wintypes.BOOL
crypt32.CertAddCertificateContextToStore.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CERT_CONTEXT),
wintypes.DWORD,
ctypes.POINTER(CERT_CONTEXT)]
CertAddCertificateContextToStore = crypt32.CertAddCertificateContextToStore
crypt32.CryptStringToBinaryA.restype = wintypes.BOOL
crypt32.CryptStringToBinaryA.argtypes = [wintypes.LPCSTR,
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(wintypes.BYTE),
ctypes.POINTER(wintypes.DWORD),
ctypes.POINTER(wintypes.DWORD),
ctypes.POINTER(wintypes.DWORD)]
CryptStringToBinaryA = crypt32.CryptStringToBinaryA
crypt32.CertAddEncodedCertificateToStore.restype = wintypes.BOOL
crypt32.CertAddEncodedCertificateToStore.argtypes = [
wintypes.HANDLE,
wintypes.DWORD,
ctypes.POINTER(wintypes.BYTE),
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(ctypes.POINTER(CERT_CONTEXT))]
CertAddEncodedCertificateToStore = crypt32.CertAddEncodedCertificateToStore
crypt32.CertGetNameStringW.restype = wintypes.DWORD
crypt32.CertGetNameStringW.argtypes = [ctypes.POINTER(CERT_CONTEXT),
wintypes.DWORD,
wintypes.DWORD,
ctypes.c_void_p,
wintypes.LPWSTR,
wintypes.DWORD]
CertGetNameString = crypt32.CertGetNameStringW
crypt32.CertFreeCertificateContext.restype = wintypes.BOOL
crypt32.CertFreeCertificateContext.argtypes = [ctypes.POINTER(CERT_CONTEXT)]
CertFreeCertificateContext = crypt32.CertFreeCertificateContext
crypt32.CertCloseStore.restype = wintypes.BOOL
crypt32.CertCloseStore.argtypes = [wintypes.HANDLE, wintypes.DWORD]
CertCloseStore = crypt32.CertCloseStore
crypt32.CertGetCertificateContextProperty.restype = wintypes.BOOL
crypt32.CertGetCertificateContextProperty.argtypes = [
ctypes.POINTER(CERT_CONTEXT),
wintypes.DWORD,
ctypes.c_void_p,
ctypes.POINTER(wintypes.DWORD)]
CertGetCertificateContextProperty = crypt32.CertGetCertificateContextProperty
| apache-2.0 |
DaggerES/ReloadCam | DELETED_ReloadCam_Server_Cccam4you.py | 1 | 1427 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Refrescador automatico de clines
#Creado por Dagger - https://github.com/gavazquez
import ReloadCam_Main, ReloadCam_Helper
def GetVersion():
return 7
#Filename must start with Server, classname and argument must be the same!
class Cccam4you(ReloadCam_Main.Server):
def GetUrl(self, serverNo):
#Pon un breakpoint aqui si quieres ver la URL verdadera ;)
#http://cccam-free2.com/cccamfree/get.php
if serverNo <= 1:
realUrl = ReloadCam_Helper.Decrypt('maanpH1wfNXIz9DOkZekmJl1b7Dh0pvSxMeSn5mmqKZ82crgndHMoQ==')
else:
realUrl = ReloadCam_Helper.Decrypt('maanpH1wfOnc453Ex5SToGi8sMKgyNvckMqjl5hocqiy5pPc19E=')
return realUrl
def GetClines(self):
print "Now getting Cccam4you clines!"
cccam4youClines = []
cccam4youClines.append(self.__GetCccam4youCline(1))
cccam4youClines.append(self.__GetCccam4youCline(2))
cccam4youClines = filter(None, cccam4youClines)
if len(cccam4youClines) == 0: print "No Cccam4you lines retrieved"
return cccam4youClines
def __GetCccam4youCline(self, serverNo):
htmlCode = ReloadCam_Helper.GetHtmlCode(None, self.GetUrl(serverNo))
cline = ReloadCam_Helper.FindStandardClineInText(htmlCode)
if cline != None and ReloadCam_Helper.TestCline(cline):
return cline
return None
| gpl-3.0 |
h0tw1r3/mame | 3rdparty/googletest/googletest/test/gtest_filter_unittest.py | 364 | 21325 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
CMLL/taiga-back | taiga/projects/milestones/admin.py | 8 | 1434 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from taiga.projects.notifications.admin import WatchedInline
from taiga.projects.votes.admin import VoteInline
from . import models
class MilestoneInline(admin.TabularInline):
model = models.Milestone
extra = 0
class MilestoneAdmin(admin.ModelAdmin):
list_display = ["name", "project", "owner", "closed", "estimated_start",
"estimated_finish"]
list_display_links = list_display
list_filter = ["project"]
readonly_fields = ["owner"]
inlines = [WatchedInline, VoteInline]
admin.site.register(models.Milestone, MilestoneAdmin)
| agpl-3.0 |
pyspace/pyspace | pySPACE/missions/nodes/base_node.py | 1 | 72795 | """ Skeleton for an elemental transformation of the signal
This includes some exception and metaclass handling, but the most important part
is the :class:`~pySPACE.missions.nodes.base_node.BaseNode`.
.. note::
This module includes a reimplementation of the MDP node class that
is better suited for the purposes of pySPACE. For instance
it provides methods to allow the benchmarking of supervised training,
storing, loading, cross validation, logging ...
Furthermore, it takes care for the totally different data types,
because in our case, the input data is 2-dimensional.
These differences in concept are quite essential and resulted in
creating an 'own' implementation, comprising the code into one module,
instead of keeping the inheritance of the MDP node class.
Nevertheless a lot of code was copied from this great library.
.. image:: ../../graphics/node.png
:width: 500
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/11/25
MDP (version 3.3) is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <mdp-toolkit-devel@lists.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import itertools
import copy
# logging imports
import logging
import logging.handlers
import warnings
import socket
import os
import time
import cPickle
import numpy
import pySPACE
from pySPACE.tools.memoize_generator import MemoizeGenerator
from pySPACE.missions.nodes.decorators import NoOptimizationParameter
# Exceptions from MDP
class NodeException(Exception):
"""Base class for exceptions in `Node` subclasses."""
pass
class InconsistentDimException(NodeException):
"""Raised when there is a conflict setting the dimensions
Note that incoming data with conflicting dimensionality raises a normal
`NodeException`.
"""
pass
class TrainingException(NodeException):
"""Base class for exceptions in the training phase."""
pass
class TrainingFinishedException(TrainingException):
"""Raised when the `Node.train` method is called although the
training phase is closed.
"""
pass
class IsNotTrainableException(TrainingException):
"""Raised when the `Node.train` method is called although the
node is not trainable.
"""
pass
class NodeMetaclass(type):
""" General meta class for future features """
def __new__(cls, classname, bases, members):
""" Forward to standard method from type """
return super(NodeMetaclass, cls).__new__(cls, classname, bases, members)
@NoOptimizationParameter("store")
@NoOptimizationParameter("retrain")
@NoOptimizationParameter("input_dim")
@NoOptimizationParameter("output_dim")
@NoOptimizationParameter("dtype")
@NoOptimizationParameter("kwargs_warning")
class BaseNode(object):
""" Main base class for nodes which forwards data without processing
It provides methods to allow the benchmarking of supervised training,
storing, loading, cross validation, logging, ...
Furthermore, it takes care for different data types.
The input data is currently two-dimensional.
It can be:
* :class:`~pySPACE.resources.data_types.time_series.TimeSeries` or
* :class:`~pySPACE.resources.data_types.feature_vector.FeatureVector` or
* :class:`~pySPACE.resources.data_types.prediction_vector.PredictionVector`
* which all inherit from a common :class:`~pySPACE.resources.data_types.base.BaseData`.
In the following parameters are introduced which do not give any
functionality but which could generally be used by inheriting nodes.
**Parameters**
:input_dim:
Dimension(s) of the input data.
By default determined automatically.
(*optional, default: None*)
:output_dim:
Dimension(s) of the output data.
By default determined automatically.
(*optional, default: None*)
:dtype:
Data type of the data array.
By default determined automatically.
(*optional, default: None*)
:keep_in_history:
This parameter is a specialty, which comes with the
:class:`~pySPACE.resources.data_types.base.BaseData`.
The execution result of the node
is copied into the *history* parameter of the object.
Additionally, the *specs* of the object receive an entry labeled '
node_specs' containing a dictionary of additional information
from the saving node.
Especially :mod:`~pySPACE.missions.nodes.visualization` nodes
may use this functionality to visualize the change of the
processing of the data.
(*optional, default: False*)
:load_path:
This is the standard variable to load processing information for
the node especially from previous seen data.
Examples for the usage, are the loading of spatial filters,
classifiers or feature normalizations.
If a parameter load_path is provided for any node, the
node is able to replace some keywords.
So far implemented replacements:
:__RUN__: current run number
:__SPLIT__: current split number
Be aware that corresponding split and run numbers don't
necessarily mean that you're operating on the same data.
Especially if cross validations generated the splits, there
is no reason to believe that the current splitting has
anything to do with a previous one!
.. note::
The keywords **__INPUT_DATASET__** and **__RESULT_DIRECTORY__** can
also be used. The replacement of these keyword is done
in the :class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.
(*optional, default: None*)
:store:
If the node parameter *store* is set to 'True', before each reset
the internal state of the node is stored (pickled) with the
store_state method.
(*optional, default: False*)
:retrain:
If your node has the method *_inc_train* and you want to use
*incremental* training during testing or application phase,
this parameter has to be set to True.
After processing the data, the node will immediately get the label
to learn changes in the data.
For more subtle retraining in the online application,
you will additionally have to use the
parameter *buffering* ('True') to save all occurring samples
in the testing phase. The retraining is then activated by
calling the method *present_label(label)*:
If the the label is *None*, only the first buffered element
is deleted. This is used, if we don't get a label,
if we are insecure of the true label or
if we simply do not want to retrain on this sample.
In the other case, the presented label belongs to the first
buffered element, which is then given to the *_inc_train* method
together with its label.
Afterwards the buffered element is deleted.
The method could be called in different ways in a sink node,
to simulate different ways of getting labels and different ways
of incremental learning.
Furthermore, it could used by node_chain_scripts
as they can be found in the
:mod:`~pySPACE.environments.live` environment,
where we have the real
situation, that we have to check after the classification,
what was the right label of the data.
.. note:: Before using this parameter you should always check, if
the node is able for incremental learning!
(*optional, default: False*)
:buffering:
This switch is responsible for real time *incremental*
learning of the node in applications (live environment),
by mainly buffering all samples in the execute method in the testing
phase.
If *buffering* is set to 'True', the *retrain* parameter should also be
and the node must have an *_inc_train* method.
Furthermore the *present_label* method must be called externally.
Otherwise you will run into memory issues.
For more details see the documentation of the *retrain* parameter.
(*optional, default: False*)
:zero_training:
This enforces the node to be not trained, though it is trainable.
.. warning:: For usage in nodes, the algorithms need to define
proper defaults in the initialization, e.g. by using the
*load_path* parameter.
(*optional, default: True*)
:kwargs_warning:
Raise a warning if unexpected keyword arguments are given.
(*optional, default: True*)
**Implementing your own Node**
For finding out, how to implement your own node, have a look at the
:mod:`~pySPACE.missions.nodes.templates`.
**Exemplary Call**
.. code-block:: yaml
-
node : Noop
parameters :
keep_in_history : True
:input: Any (e.g. FeatureVector)
:output: Any1 (e.g. FeatureVector)
:Author: Mario Michael Krell and many more (krell@uni-bremen.de)
:Created: before 2008/09/28
"""
# setting the meta class
__metaclass__ = NodeMetaclass
def __init__(self, store=False, retrain=False, input_dim=None,
output_dim=None, dtype=None, kwargs_warning=True, **kwargs):
""" This initialization is necessary for every node
So make sure, that you use it via the *super* method in each new node.
The method cares for the setting of the basic parameters, including
parameters for storing,
and handling of training and test data.
"""
# Sanity checks
assert store in [True, False], \
"Passing inappropriate value %s for parameter 'store'." % store
assert retrain in [True, False], \
"Passing inappropriate value %s for parameter 'retrain'." \
% retrain
self.store = store
self.retrainable = retrain
#: parameter for retraining in application
#: see *present_label*
self.buffering = False
if kwargs.has_key("buffering"):
self.buffering = kwargs["buffering"]
self.zero_training = False
if kwargs.has_key("zero_training"):
self.zero_training = kwargs["zero_training"]
if self.buffering and not self.retrainable:
warnings.warn("Buffering nodes should retrains!")
self.retraining_phase=False
# whether to save data or not
self.save_intermediate_results = False
if kwargs.has_key("save_intermediate_results"):
self.save_intermediate_results = kwargs["save_intermediate_results"]
# initialize basic attributes
self._input_dim = None
self._output_dim = None
self._dtype = None
# call set functions for properties
self.set_input_dim(input_dim)
self.set_output_dim(output_dim)
self.set_dtype(dtype)
# skip the training phase if the node is not trainable
if not self.is_trainable() or self.zero_training:
self._training = False
self._train_phase = -1
self._train_phase_started = False
else:
# this var stores at which point in the training sequence we are
self._train_phase = 0
# this var is False if the training of the current phase hasn't
# started yet, True otherwise
self._train_phase_started = False
# this var is False if the complete training is finished
self._training = True
self.input_node = None
self.data_for_training = None
self.data_for_testing = None
self.root_logger = None
# distinguish execution on training and test data
# since some nodes only want to handle test data and ignore
# training data
self._training_execution_phase = False
self.current_split = 0
self.trace = False
#: Do we have to remember the outputs of this node for later reuse?
self.caching = False
self.load_path = kwargs.get('load_path', None)
self.keep_in_history = kwargs.get('keep_in_history', False)
self.node_specs = {}
self.node_name = str(type(self)).split(".")[-1].split("'")[0]
self.retrain_data = None
self.retrain_label = None
# Switch for in-/homogenous data (e.g. data with different sampling
# frequencies). Has to be handled from the node which can deal with
# inhomogenous data.
self.homogenous = kwargs.get("homogenous", True)
# Set the default run number which gives the number of the current
# iteration
self.run_number = 0
# Every Parameter is stored since we reset them with every new spit.
self.permanent_state = copy.deepcopy(self.__dict__)
if kwargs_warning:
remove_kwargs = ["homogeneous", "keep_in_history", "load_path",
"save_intermediate_results", "zero_training",
"buffering"]
for key in kwargs.keys():
if not key in remove_kwargs:
warnings.warn("The parameter '%s' seems to be unused in %s."
% (key, self.__class__.__name__) +
" Either you specified it on purpose, you spelled it " +
"wrong, or there is an implementation inconsistency.")
###### Methods, which can be overwritten by inheriting nodes ######
def _train(self, x):
""" Give the training data to the node
If a node is trainable, this method is called and *has to be* implemented.
Optionally the :func:`_stop_training` method can be additionally implemented.
"""
if self.is_trainable():
raise NotImplementedError("The node %s is not trainable"
% self.__class__.__name__)
def _stop_training(self, *args, **kwargs):
""" Called method after the training data went through the node
It can be overwritten by the inheriting node.
Normally, the :func:`_train` method only collects the data
and this method does the real (batch) training.
By default this method does nothing.
"""
pass
def _execute(self, x):
""" Elemental processing step (**key component**)
This method should be overwritten by the inheriting node.
It implements the final processing of the data of the node.
By default the data is just forwarded.
Some nodes only visualize or analyze training data
or only handle the data sets without changing the data
and so they do not need this method.
"""
return x
def _check_train_args(self, x, *args, **kwargs):
""" Checks if the arguments are correct for training
Implemented by subclasses if needed.
.. todo:: Check if this method copy is needed and
if there is a good use-case.
"""
pass
def _inc_train(self,data, class_label=None):
""" Method to be overwritten by subclass for incremental training after initial training """
raise NotImplementedError("The node %s does not implement incremental training."%self.__class__.__name__)
#@staticmethod
def is_trainable(self):
""" Return True if the node can be trained, False otherwise
*default: False*
"""
return False
#@staticmethod
def is_supervised(self):
""" Returns whether this node requires supervised training
*default: False*
"""
return False
def get_own_transformation(self, sample=None):
""" If the node has a transformation, it should overwrite this method
The format should be::
(main transformation, offset and further parameters, relevant names, transformation type)
"""
return None
@classmethod
def get_input_types(cls, as_string = True):
""" Return all available input types from the node
**Parameters**
:as_string:
Tells the method whether it should return
a string encoding of the type or a class instance
(*default: True*)
.. note ::
Strings have less overhead than class instances
"""
if cls.__name__ == "BaseNode":
# if the current node is the base node, it need not have
# defined a variable called input_types but can
# accept all inputs
# if it had such a variable, the if clauses below would
# not work and the default would be to accept all inputs
return ["PredictionVector", "FeatureVector",
"TimeSeries"]
if not hasattr(cls, "input_types"):
pack = cls.__module__.split(".")[3]
module = cls.__module__.split(".")[-1]
if pack in ['preprocessing', 'spatial_filtering',
'feature_generation']:
cls.input_types = ["TimeSeries"]
elif pack in ["feature_selection", "classification"] or \
module in ["feature_normalization", "compression",
"scikit_nodes"]:
if module == "ensemble":
cls.input_types=["PredictionVector"]
else:
cls.input_types=["FeatureVector"]
elif module in ["score_transformation", "threshold_optimization"]:
cls.input_types=["PredictionVector"]
elif pack in ["sink", "source"]:
raise NotImplementedError
else:
cls.input_types=["PredictionVector", "FeatureVector",
"TimeSeries"]
if as_string:
return cls.input_types
else:
types = []
for one_type in cls.input_types:
types.append(cls.string_to_class(one_type))
return types
@classmethod
def get_output_type(cls, input_type, as_string=True):
""" Return output type depending on the *input_type*
**Parameters**
:as_string:
Tells the method whether it should return
a string encoding of the type or a class instance
(*default: True*)
:input_type:
The input type of the node.
In most cases, the input depends on the input
and can not be inferred from the algorithm category.
.. note ::
Strings have less overhead than class instances and
that is why they are normally used in routine operations
By default the input type is assumed to be the same as the output type,
except for *classification*, *feature_generation* and *type_conversion*.
For any other algorithm type, especially for meta nodes,
this method needs to be overwritten.
Otherwise, a warning will occur.
"""
pack = cls.__module__.split(".")[3:4]
if "classification" in pack:
result = "PredictionVector"
elif "feature_generation" in pack:
result = "FeatureVector"
elif "type_conversion" in pack:
# it is expected that nodes in the type_conversion script
# overwrite this method with their specific output types
raise NotImplementedError
else:
result = str(input_type)
if as_string:
return result
else:
return cls.string_to_class(result)
@staticmethod
def string_to_class(string_encoding):
""" given a string variable, outputs a class instance
e.g., obtaining a TimeSeries
.. code-block:: python
>>> result = BaseNode.string_to_class("TimeSeries")
>>> print type(result)
<class 'pySPACE.resources.data_types.time_series.TimeSeries'>
"""
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.prediction_vector \
import PredictionVector
if "TimeSeries" in string_encoding:
return TimeSeries
elif "PredictionVector" in string_encoding:
return PredictionVector
elif "FeatureVector" in string_encoding:
return FeatureVector
else:
raise NotImplementedError
###### Reimplementation of some MDP methods that have some flaws, ######
###### when used with the different concepts, used here. ######
### check functions
def _check_input(self, x):
""" Check the input_dim and array consistency
Here input_dim are the dimensions of the input array
"""
data_array = x.view(numpy.ndarray)
# check input type
if not type(x).__name__ in self.get_input_types():
# self._log("Data type (%s) not supported by %s?" %
# (type(x).__name__, self.__class__.__name__),
# level=logging.CRITICAL)
warnings.warn("Data type (%s) not supported by %s?" %
(type(x).__name__, self.__class__.__name__))
# raise NotImplementedError
# check input rank
if not x.ndim == 2:
error_str = "Class %s: x has rank %d, should be 2" \
% (self.__class__.__name__, x.ndim)
raise NodeException(error_str)
# check for NaN
if not numpy.isfinite(data_array).all():
error_str = "Class %s: Not finite number in data: %s!" \
% (self.__class__.__name__, str(data_array))
raise NodeException(error_str)
# set the dtype if necessary
if self.dtype is None:
self.dtype = x.dtype
# check if the user has changed the dtype of the input
if self.dtype is not None and self.dtype.name != x.dtype.name:
warnings.warn("The dtype of individual data points is " +
"inconsistent.\n The former data type was " +
"(%s) and now the input is (%s)"
% (str(self.dtype), str(x.dtype)))
# set the input dimension if necessary
if self.input_dim is None:
shape = x.shape
if len(shape)==1:
self.input_dim=shape[0]
else:
self.input_dim=shape
# in case of homogenous data control the dimension x
if self.homogenous:
if not x.shape == self.input_dim and not x.shape[1]==self.input_dim:
error_str = "Class %s: x has dimension %s, should be %s" \
% (self.__class__.__name__, str(x.shape), str(self.input_dim))
raise NodeException(error_str)
### Handle the data types of the data
def _get_supported_dtypes(self):
""" Return the list of dtypes supported by this node.
The types can be specified in any format allowed by *numpy* `dtype`.
.. todo:: In future we should use as default float and double
and specify explicitly for each node
if it can use other input formats!
"""
def get_dtypes(typecodes_key):
"""Return the list of dtypes corresponding to the set of
typecodes defined in numpy.typecodes[typecodes_key].
E.g., get_dtypes('Float') = [dtype('f'), dtype('d'), dtype('g')].
"""
types = []
for c in numpy.typecodes[typecodes_key]:
try:
dtype = numpy.dtype(c)
types.append(dtype)
except TypeError:
pass
return types
return get_dtypes('All')
def get_dtype(self):
""" Return dtype."""
return self._dtype
def set_dtype(self, t):
"""Set internal structures' dtype.
Perform sanity checks and then calls `self._set_dtype(n)`, which
is responsible for setting the internal attribute `self._dtype`.
.. note:: Subclasses should overwrite self._set_dtype when needed.
"""
# Difference to MDP's standard set_dtype: Setting of dtypes allows
# now also dtypes that are inherited from supported dtypes
# (in order to allow for var-length strings)
if t is None:
return
t = numpy.dtype(t)
if (self._dtype is not None) and (self._dtype != t):
errstr = ("Class %s: dtype is already set to '%s' "
"('%s' given)!" % (self.__class__.__name__, t,
self.dtype.name))
raise Exception(errstr)
else:
for dt in self.get_supported_dtypes():
if numpy.issubdtype(t, dt):
self._set_dtype(t)
return
errstr = ("\ndtype '%s' is not supported.\n"
"Supported dtypes: %s" %
(t.name, [numpy.dtype(t).name for t in
self.get_supported_dtypes()]))
raise Exception(errstr)
def _set_dtype(self, t):
t = numpy.dtype(t)
if t not in self.get_supported_dtypes():
raise NodeException(
'dtype %s not among supported dtypes (%s) in node %s'
% (str(t), self.get_supported_dtypes(),self.__class__.__name__))
self._dtype = t
def get_supported_dtypes(self):
"""Return dtypes supported by the node as a list of numpy `dtype` objects.
Note that subclasses should overwrite `self._get_supported_dtypes`
when needed."""
return [numpy.dtype(t) for t in self._get_supported_dtypes()]
supported_dtypes = property(get_supported_dtypes,
doc="Supported dtypes")
dtype = property(get_dtype,
set_dtype,
doc = "dtype")
################ Reimplementation END ################
@staticmethod
def node_from_yaml(node_spec):
""" Creates a node based on the dictionary *node_spec* """
# The node_spec from the calling method should not be changed,
# hence there are maybe several recalls with the same node_spec
assert(type(node_spec) == dict and "node" in node_spec), \
"Error in node spec. (no dict or no 'node' key): '%s'" % node_spec
node_spec = copy.deepcopy(node_spec)
if node_spec is None:
warnings.warn("Maybe you have a wrong minus with no following "
"entry in your spec file? Please correct and "
"restart!")
return
# evaluation of components of the form "eval(command)"
if isinstance(node_spec["node"], basestring) \
and node_spec["node"].startswith("eval("):
node_name = eval(node_spec["node"][5:-1])
else:
node_name = node_spec["node"]
try:
node_class = pySPACE.missions.nodes.NODE_MAPPING[node_name]
except KeyError:
raise UserWarning("No node with name %s exists" % node_name)
# If the node overwrites this method we delegate node creation
if node_name not in ['Noop', "Base", "BaseNode"] and \
'node_from_yaml' in node_class.__dict__:
return node_class.node_from_yaml(node_spec)
elif node_class.__module__ == "pySPACE.missions.nodes.external":
# do not interface the wrapper but the real class
if 'node_from_yaml' in node_class.__bases__[0].__dict__:
return node_class.node_from_yaml(node_spec)
# If parameters need to be passed to the class
if "parameters" in node_spec:
if node_spec["parameters"] is None:
warnings.warn("No parameters specified for %s!" % node_name)
node_obj = node_class()
else:
# All parameters which are eval() statements
# are considered to be Python expressions and are evaluated
BaseNode.eval_dict(node_spec["parameters"])
# Create the node object
#try:
node_obj = node_class(**node_spec["parameters"])
#except TypeError, e:
#raise TypeError("%s: %s" % (node_class.__name__, e))
else:
node_obj = node_class()
return node_obj
@staticmethod
def eval_dict(dictionary):
""" Check dictionary entries starts and evaluate if needed
Evaluation is switched on, by using ``eval(statement)`` to
evaluate the *statement*.
Dictionary entries are replaced with evaluation result.
.. note:: No additional string mark up needed, contrary to normal
Python evaluate syntax
"""
for key, value in dictionary.iteritems():
if isinstance(value, basestring) and value.startswith("eval("):
try:
dictionary[key] = eval(value[5:-1])
except BaseException, e:
warnings.warn("Could not evaluate:"+value+". Error:"+str(e))
def set_permanent_attributes(self, **kwargs):
""" Add all the items of the given kwargs dictionary as permanent attributes of this object
Permanent attribute are reset, when using the reset method.
The other attributes are deleted.
.. note:: Parameters of the basic init function are always set permanent.
.. note:: The memory of permanent attributes is doubled.
When having large objects, like the data in source nodes,
you should handle this by overwriting the reset method.
The main reason for this method is the reset of nodes during cross
validation. Here the parameters of the algorithms have to be reset,
to have independent evaluations.
"""
self.__dict__.update(kwargs)
# Deepcopy the permanent state (except the input node and generator)
for key, value in kwargs.iteritems():
if key == "input_node" or (key == "generator"):
self.permanent_state[key] = value
else:
self.permanent_state[key] = copy.deepcopy(value)
# Track changes in node_specs. This is a dictionary with all local
# variables of the __init__ function.
# The variable names can be found with the co_varnames attribute
# of the function object code object.
self.node_specs = dict([ (key, val)
for key, val in self.__dict__.items()
if key in self.__init__.im_func.func_code.co_varnames])
self.node_specs.update({'node_name': self.node_name})
def reset(self):
""" Reset the state of the object to the clean state it had after its initialization
.. note:: Attributes in the permanent state are not overwritten/reset.
Parameters were set into permanent state with the method:
*set_permanent_attributes*.
"""
# We have to create a temporary reference since we remove
# the self.permanent_state reference in the next step by overwriting
# self.__dict__
tmp = self.permanent_state
# The input node should not be deepcopied since otherwise the input
# node and the node in the node list that precedes this node are
# different objects
input_node = self.permanent_state.pop("input_node")
self.__dict__ = copy.deepcopy(tmp)
self.input_node = input_node
self.permanent_state = tmp
self.permanent_state["input_node"] = input_node
def reset_attribute(self, attribute_string):
""" Reset a single attribute with its previously saved permanent state """
if not isinstance(attribute_string, basestring):
warnings.warn("You did not use a string for reset."+
"Instead you used:%s."%str(attribute_string))
else:
try:
self.__dict__[attribute_string]=copy.deepcopy(self.permanent_state[attribute_string])
except KeyError:
warnings.warn("You did not use a valid attribute for reset."+
"Instead you used:%s."%str(attribute_string))
def is_retrainable(self):
""" Returns if node supports retraining """
return self.retrainable
def is_source_node(self):
""" Returns whether this node is a source node that can yield data """
# A source node is identified by its name ending
return self.__class__.__name__.endswith("SourceNode")
def is_sink_node(self):
""" Returns if this node is a sink node that gathers results"""
# A sink node is identified by its property of having a method
# with the name "store_results"
return hasattr(self, "get_result_dataset")
def is_split_node(self):
""" Returns whether this is a split node. """
return False
def register_input_node(self, node):
""" Register the given node as input """
self.set_permanent_attributes(input_node=node)
def set_run_number(self, run_number):
""" Informs the node about the number of the current run
Per default, a node is not interested in the run number and simply
hands the information back to its input node.
For nodes like splitter that are interested in the run_number, this method
can be overwritten.
"""
self.set_permanent_attributes(run_number=run_number)
if not self.is_source_node():
self.input_node.set_run_number(run_number)
def set_temp_dir(self, temp_dir):
""" Give directory name for temporary data saves """
self.set_permanent_attributes(temp_dir = temp_dir)
try:
self.input_node.set_temp_dir(temp_dir)
except:
pass
def get_source_file_name(self):
""" Returns the name of the source file.
This works for the Stream2TimeSeriesSourceNode.
For other nodes None is returned.
"""
try:
return self.input_node.get_source_file_name()
except:
pass
def perform_final_split_action(self):
""" Perform automatic action when the processing of the current split is finished.
This method does nothing in the default case, but can be overwritten by child nodes if desired.
"""
pass
def use_next_split(self):
""" Use the next split of the data into training and test data.
Returns True if more splits are available, otherwise False.
This method is useful for benchmarking
"""
assert(self.input_node != None)
has_more_splits = self.input_node.use_next_split()
self.perform_final_split_action()
if has_more_splits:
# Counting the number of the current split
self.increase_split_number()
# Resetting the node for the next run
self.reset()
return has_more_splits
def increase_split_number(self):
""" Method for increasing split number (needed for access by meta nodes)
.. todo:: Better exception handling. Move code to meta/Layer nodes?
"""
self.set_permanent_attributes(current_split=self.current_split + 1)
try:
for node in self.nodes:
node.increase_split_number()
except:
pass
try:
self.node.increase_split_number()
except:
pass
def _get_train_set(self, use_test_data = False):
""" Returns the data that can be used for training """
# We take data that is provided by the input node for training
# NOTE: This might involve training of the preceding nodes
train_set = self.input_node.request_data_for_training(use_test_data)
# If we should also use the test data for training (i.e. we are not
# doing benchmarking...)
if use_test_data:
# Add the data provided by the input node for testing to the
# training set
train_set = \
itertools.chain(train_set,
self.input_node.request_data_for_testing())
return train_set
def train_sweep(self, use_test_data):
""" Performs the actual training of the node.
If use_test_data is True, we use all available data for training,
otherwise only the data that is explicitly marked as data for training.
This is a requirement e.g. for benchmarking.
"""
# If this node does not require training
if not self.is_trainable() or self.zero_training:
self._log("Does not require training.")
# Check whether the node requires supervised training
elif self.is_supervised(): # Supervised learning
self._log("Supervised training started.")
# For all train phases
while self.get_remaining_train_phase() > 0:
self._log("Supervised train stage %s started."
% self._train_phase)
train_set = self._get_train_set(use_test_data)
# Present all available data (along with the corresponding
# label) to this node
for data, label in train_set:
self.train(data, label)
# Stop this train phase
self.stop_training()
self._log("Supervised train stage %s finished."
% self._train_phase)
self._log("Supervised training finished.")
elif self.is_trainable(): # Unsupervised learning
self._log("Unsupervised training started.")
train_set = self._get_train_set(use_test_data)
# For all train phases
while self.get_remaining_train_phase() > 0:
self._log("Unsupervised train stage %s started."
% self._train_phase)
# Present all available data to this node, but
# skip the label (since we are doing unsupervised training)
for data, label in train_set:
self.train(data)
self._log("Unsupervised train stage %s finished."
% self._train_phase)
# Stop this train phase
self.stop_training()
self._log("Unsupervised training finished.")
def process(self):
""" Processes all data that is provided by the input node
Returns a generator that yields the data after being processed by this
node.
"""
assert(self.input_node != None), "No input node specified!"
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0), "Node not trained!"
self._log("Processing data.", level=logging.DEBUG)
data_generator = \
itertools.imap(lambda (data, label):
(self._trace(self.execute(self._trace(data, "entry")),
"exit"), label),
self.input_node.process())
return data_generator
def request_data_for_training(self, use_test_data):
""" Returns data for training of subsequent nodes of the node chain
A call to this method might involve training of the node chain up this
node. If use_test_data is true, all available data is used for
training, otherwise only the data that is explicitly for training.
"""
assert(self.input_node != None)
self._log("Data for training is requested.", level = logging.DEBUG)
# If we haven't computed the data for training yet
if self.data_for_training == None:
self._log("Producing data for training.", level = logging.DEBUG)
# Train this node
self.train_sweep(use_test_data)
# Compute a generator the yields the train data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that'll
# yield the same sequence
# This line crashes without the NodeMetaclass bug fix
train_data_generator = \
itertools.imap(lambda (data, label) :
(self.execute(data,in_training=True), label),
self.input_node.request_data_for_training(
use_test_data))
self.data_for_training = MemoizeGenerator(train_data_generator,
caching=self.caching)
self._log("Data for training finished", level = logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns data for testing of subsequent nodes of the node chain
A call to this node might involve evaluating the whole node chain
up to this node.
"""
assert(self.input_node != None)
self._log("Data for testing is requested.", level = logging.DEBUG)
# If we haven't computed the data for testing yet
if self.data_for_testing == None:
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that'll
# yield the same sequence
self._log("Producing data for testing.", level = logging.DEBUG)
test_data_generator = \
itertools.imap(lambda (data, label):
self.test_retrain(data, label),
self.input_node.request_data_for_testing())
self.data_for_testing = MemoizeGenerator(test_data_generator,
caching=self.caching)
self._log("Data for testing finished", level = logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_testing.fresh()
def test_retrain(self,data,label):
""" Wrapper method for offline incremental retraining
The parameter *retrain* has to be set to True to activate offline retraining.
The parameter *buffering* should be False, which is the default.
.. note:: The execute method of the node is called implicitly
in this node instead of being called in the
request_data_for_testing-method.
For the incremental retraining itself
the method _inc_train (to be implemented)
is called.
For programming, we first train on the old data and then execute
on the new one. This is necessary, since the following nodes
may need the status of the transformation.
So we must not change it after calling execute.
.. note :: Currently there is no retraining to the last sample.
This could be done by modifying the :func:`present_label`
method and calling it in the last node after the
last sample was processed.
"""
if self.is_retrainable() and not self.buffering and hasattr(self, "_inc_train"):
if not self.retraining_phase:
self.retraining_phase=True
self.start_retraining()
else:
self._inc_train(self.retrain_data, self.retrain_label)
new_data = self.execute(data)
self.retrain_data = data
self.retrain_label = label
return (new_data, label)
else:
return (self.execute(data), label)
def start_retraining(self):
""" Method called for initialization of retraining """
pass
def present_label(self, label):
""" Wrapper method for incremental training in application case (live)
The parameters *retrain* and *buffering* have to be
set to True to activate this functionality.
For skipping examples, you can use None, "null" or an empty string as label.
.. note:: For the incremental training itself
the method _inc_train (to be implemented)
is called.
"""
if not self.input_node is None:
self.input_node.present_label(label)
if self.buffering and self.is_retrainable():
if not self.retraining_phase:
self.retraining_phase=True
self.start_retraining()
if not label in [None, "null", ""]:
if type(label) is str:
self._inc_train(self.data_buffer[0],label)
elif type(label) is list:
self._batch_retrain(self.data_buffer[0:len(label)],label)
for i in range(len(label-1)):
self.data_buffer.pop(0)
self.data_buffer.pop(0)
def _batch_retrain(self,data_list, label_list):
""" Interface for retraining with a set of data
A possible application is a calibration phase, where we may want to
improve non-incremental algorithms.
If this method is not overwritten, it uses the incremental training
as a default.
"""
for i in range(label_list):
self._inc_train(data_list[i],label_list[i])
data_list = None
label_list = None
def _change_parameters(self,parameters):
""" Overwrite parameters of a node e.g. when it is loaded and
parameters like *retrain* or *recalibrate* have to be set to True.
The node only provides the simple straight forward way,
of permanently replacing the parameters.
For more sophisticated parameter handling, nodes have to replace this
method by their own.
"""
self.set_permanent_attributes(**parameters)
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir*
This method is automatically called during benchmarking
for every node.
The standard convention is, that nodes only store their state,
if the parameter *store* in the specification is set *True*.
"""
def format_dict(item):
if issubclass(type(item), BaseNode):
# the item is a nested pyspace node -> get dictionary and clean it
attr_dict = item.__getstate__()
attr_dict = format_dict(attr_dict)
return attr_dict
elif isinstance(item, dict):
# the item is a attribute dict -> remove inappropriate value
new_dict = {}
if item.has_key('input_node'):
del item['input_node']
for key, value in item.iteritems():
if isinstance(value, dict):
value = format_dict(value)
elif isinstance(value, list):
temp = []
for subitem in value:
if isinstance(subitem, dict) or issubclass(type(subitem), BaseNode):
temp.append(format_dict(subitem))
else:
temp.append(subitem)
new_dict[key] = temp
elif value is None:
new_dict[key] = -1
else:
new_dict[key] = value
return new_dict
else:
# the item is a primitive type
return item
if isinstance(self.store, basestring) and "mat" in self.store:
import scipy.io
node_index = 0
result_file = None
# export to text file
while not result_file:
filename = os.path.join(result_dir, self.node_name + "_"
+ str(node_index))
if os.path.isfile(filename+".mat"):
node_index += 1
continue
result_file = open(filename+".mat","w")
attr_dict = self.__getstate__()
# matlab doesn't like Nones.. replace Nones with 0's
attr_dict = format_dict(attr_dict)
scipy.io.savemat(result_file, mdict=attr_dict)
def _log(self, message, level = logging.INFO):
""" Log the given message into the logger of this class """
if pySPACE.configuration.min_log_level>level:
return
if not hasattr(self,"root_logger") or self.root_logger == None:
self.root_logger = logging.getLogger("%s.%s.%s" % (socket.gethostname(),
os.getpid(),
self))
if len(self.root_logger.handlers) == 0:
self.root_logger.addHandler(logging.handlers.SocketHandler('localhost',
logging.handlers.DEFAULT_TCP_LOGGING_PORT))
self.root_logger.log(level, message)
def __del__(self):
# Stop logging
if hasattr(self,"root_logger") and self.root_logger is not None:
for handler in self.root_logger.handlers:
handler.close()
self.root_logger.removeHandler(handler)
del(self.root_logger)
def _trace(self, x, key_str):
""" Every call of this function creates a time-stamped log entry """
if self.trace:
self._log("%s time: %f" % (key_str , time.time()))
return x
def __getstate__(self):
""" Return a pickable state for this object """
self._log("Pickling instance of class %s." % self.__class__.__name__,
level = logging.DEBUG)
odict = self.__dict__.copy() # copy the dict since we change it
odict['data_for_training'] = None
odict['data_for_testing'] = None
odict['root_logger'] = None
if "data" in odict.keys() and self.is_split_node():
odict.pop("data")
if "generator" in odict.keys():
odict["generator"] = None
odict['permanent_state'] = None
#del odict['permanent_state']
# Remove other non-pickable stuff
remove_keys=[]
for key, value in odict.iteritems():
if key == "input_node" or key == "flow":
continue
try:
cPickle.dumps(value)
except (TypeError, cPickle.PicklingError):
remove_keys.append(key)
for key in remove_keys:
self._log("Removing attribute %s of class %s (type %s) because of "
"it can not be pickled."
% (key, self.__class__.__name__, type(odict[key])),
level = logging.INFO)
odict.pop(key)
return odict
def __setstate__(self, sdict):
""" Restore object from its pickled state"""
self._log("Restoring instance of class %s." % self.__class__.__name__,
level = logging.DEBUG)
self.__dict__.update(sdict) # update attributes
# Reconstruct the permanent state of the object
# This should be a deepcopy except for the input node...
if "input_node" in self.__dict__:
input_node = self.__dict__.pop("input_node")
self.permanent_state = copy.deepcopy(self.__dict__)
self.__dict__["input_node"] = input_node
self.permanent_state['input_node'] = self.input_node
else:
self.permanent_state = copy.deepcopy(self.__dict__)
def replace_keywords_in_load_path(self):
""" Replace keywords in the load_path parameter
.. note::
The keywords **__INPUT_DATASET__** and **__RESULT_DIRECTORY__** can
also be used. The replacement of these keyword is done by
the :class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.
"""
self.load_path = self.load_path.replace('__SPLIT__',
'%i' % self.current_split)
self.load_path = self.load_path.replace('__RUN__',
'%i' % self.run_number)
def get_previous_transformations(self, sample=None):
""" Recursively construct a list of (linear) transformations
These transformations, applied on the data are needed later on for
visualization. So the new classifier can be visualized relative
to a previous linear processing step.
.. todo:: Check if splitter node works together with this node.
"""
if self.is_source_node():
return []
else:
transformations = self.input_node.get_previous_transformations(sample)
own_transformation = self.get_own_transformation(sample)
if own_transformation is None:
# generic extraction of transformation for meta nodes
try:
if "flow" in self.__dict__:
own_transformations = self.flow[-1].get_previous_transformations(sample)
transformations.extend(own_transformations)
return transformations
elif "nodes" in self.__dict__:
own_transformations = self.nodes[-1].get_previous_transformations(sample)
transformations.extend(own_transformations)
return transformations
elif "node" in self.__dict__:
own_transformation = self.node.get_own_transformation(sample)
except:
pass
if not (own_transformation is None):
transformations.append(own_transformation)
return transformations
def get_previous_execute(self,data, number=numpy.inf):
""" Get execution from previous nodes on *data*
*data* should be forwarded to the previous *number* input nodes and the
the result should be returned. By default, the data is recursively
executed from the source node.
This function is needed for the implementation of the classifier
application of the backtransformation concept, where the classifier
function is kept in a state before transformation to track changes in
the processing chain.
"""
if self.is_source_node():
if not number == numpy.inf:
self._log("Index %s to large for recursive execute!" % number,
level=logging.ERROR)
return self._execute(data)
elif number == 1:
return self.input_node._execute(data)
else:
number -= 1
return self.input_node._execute(
self.input_node.get_previous_execute(data, number))
### properties, copied from MDP without change
def get_input_dim(self):
"""Return input dimensions."""
return self._input_dim
def set_input_dim(self, n):
"""Set input dimensions.
Perform sanity checks and then calls ``self._set_input_dim(n)``, which
is responsible for setting the internal attribute ``self._input_dim``.
Note that subclasses should overwrite `self._set_input_dim`
when needed.
"""
if n is None:
pass
elif (self._input_dim is not None) and (self._input_dim != n):
msg = ("Input dim are set already (%d) "
"(%d given) in node %s!" % (self.input_dim, n, self.__class__.__name__))
raise InconsistentDimException(msg)
else:
self._set_input_dim(n)
def _set_input_dim(self, n):
self._input_dim = n
input_dim = property(get_input_dim,
set_input_dim,
doc="Input dimensions")
def get_output_dim(self):
"""Return output dimensions."""
return self._output_dim
def set_output_dim(self, n):
"""Set output dimensions.
Perform sanity checks and then calls ``self._set_output_dim(n)``, which
is responsible for setting the internal attribute ``self._output_dim``.
Note that subclasses should overwrite `self._set_output_dim`
when needed.
"""
if n is None:
pass
elif (self._output_dim is not None) and (self._output_dim != n):
msg = ("Output dim are set already (%d) "
"(%d given) in node %s!" % (self.output_dim, n, self.__class__.__name__))
raise InconsistentDimException(msg)
else:
self._set_output_dim(n)
def _set_output_dim(self, n):
self._output_dim = n
output_dim = property(get_output_dim,
set_output_dim,
doc="Output dimensions")
### Definition of training sequence from MDP
_train_seq = property(lambda self: self._get_train_seq(),
doc="""\
List of tuples::
[(training-phase1, stop-training-phase1),
(training-phase2, stop_training-phase2),
...]
By default::
_train_seq = [(self._train, self._stop_training)]
""")
def _get_train_seq(self):
return [(self._train, self._stop_training)]
def has_multiple_training_phases(self):
"""Return True if the node has multiple training phases."""
return len(self._train_seq) > 1
### Node states from MDP
def is_training(self):
"""Return True if the node is in the training phase,
False otherwise."""
return self._training
def get_current_train_phase(self):
"""Return the index of the current training phase.
The training phases are defined in the list `self._train_seq`."""
return self._train_phase
def get_remaining_train_phase(self):
"""Return the number of training phases still to accomplish.
If the node is not trainable then return 0.
"""
if self.is_trainable() and not self._train_phase<0:
return len(self._train_seq) - self._train_phase
else:
return 0
### check functions from mdp
def _check_output(self, y):
# check output rank
if not y.ndim == 2:
error_str = "y has rank %d, should be 2 in node %s" % (y.ndim, self.__class__.__name__)
raise NodeException(error_str)
# check the output dimension
if not y.shape[1] == self.output_dim:
error_str = "y has dimension %d, should be %d in node %s" % (y.shape[1],
self.output_dim,
self.__class__.__name__)
raise NodeException(error_str)
def _if_training_stop_training(self):
if self.is_training():
self.stop_training()
# if there is some training phases left we shouldn't be here!
if self.get_remaining_train_phase() > 0:
error_str = "The training phases of node %s are not completed yet."%self.__class__.__name__
raise TrainingException(error_str)
def _pre_execution_checks(self, x):
"""This method contains all pre-execution checks.
It can be used when a subclass defines multiple execution methods.
"""
# if training has not started yet, assume we want to train the node
if (self.get_current_train_phase() == 0 and
not self._train_phase_started):
while True:
self.train(x)
if self.get_remaining_train_phase() > 1:
self.stop_training()
else:
break
self._if_training_stop_training()
# in case of homogenous data control the dimension x
if self.homogenous:
self._check_input(x)
# set the output dimension if necessary
if self.output_dim is None:
self.output_dim = self.input_dim
### casting helper functions from MDP
def _refcast(self, x):
"""Helper function to cast arrays to the internal dtype."""
def refcast(array, dtype):
"""
Cast the array to dtype only if necessary, otherwise return a reference.
.. todo:: move to tools?
"""
dtype = numpy.dtype(dtype)
if array.dtype == dtype:
return array
return array.astype(dtype)
return refcast(x, self.dtype)
### User interface to the overwritten methods
def execute(self, x, in_training=False, *args, **kwargs):
""" Process the data contained in 'x'
If the object is still in the training phase, the function
'stop_training' will be called.
'x' is NOT a matrix having different variables on different columns
and observations on the rows as in MDP.
'x' is a data type object, which can be a TimeSeries,
a FeatureVector or a PredictionVector.
.. note:: This method changes the original MDP implementation.
The main difference to the MDP's standard execute method is
that here the output_dim of the node is set per default to
the size of the node's first result (and not to the size of
the input data). Furthermore we have a possible buffering
mode for retraining and suppress the setting of the dtype.
"""
# data buffering for training in live usage
# needed for the delayed training, which is called
# by the present_label method
if hasattr(self, "buffering") and self.buffering and not in_training:
if not hasattr(self, "data_buffer"):
self.data_buffer = []
# no buffering in request_data_for_training
self.data_buffer.append(x)
self._training_execution_phase = in_training
# Additional feature, that standard keywords are replaced in the
# loading path of the node.
if self.load_path is not None:
self.replace_keywords_in_load_path()
# # if training has not started yet, assume we want to train the node
# # MDP-SPECIFIC CODE, WHICH SHOULD NOT BE USED ANYMORE
# if (self.get_current_train_phase() == 0 and
# not self._train_phase_started):
# while True:
# self.train(x)
# if self.get_remaining_train_phase() > 1:
# self.stop_training()
# else:
# break
#
# self._if_training_stop_training()
# in case of homogenous data control the dimension x
if self.homogenous:
self._check_input(x)
# Do the actual computation
result = self._execute(self._refcast(x), *args, **kwargs)
# check output type
if not type(result).__name__ == self.get_output_type(type(x).__name__):
# self._log("Inappropriate output %s to given input %s?" %
# (self.get_output_type(type(x).__name__), type(x).__name__),
# level=logging.CRITICAL)
warnings.warn("Inappropriate output %s to given input %s in %s?" %
(type(result).__name__, type(x).__name__,
self.__class__.__name__) +
" Expected: %s. " % self.get_output_type(type(x).__name__) +
"Please check your node chain. " +
"Please provide a bug report if the respective node is " +
"expecting the wrong output.")
# Make sure key, tag, specs and history are passed
if x.has_meta():
result.inherit_meta_from(x)
else:
result.generate_meta()
if self.keep_in_history:
# Append current data to history
result.add_to_history(result, self.node_specs)
# # set the dtype if necessary
# if self.dtype is None:
# self.dtype = result.dtype
# set the output dimension if necessary
if self.output_dim is None:
shape = result.shape
if len(shape)==1:
self.output_dim = shape[0]
else:
self.output_dim = shape
elif self.homogenous:
if not (self.output_dim in [result.shape,result.shape[1]]):
error_str = "y has dimension %d, should be %d in node %s" \
% (result.shape[1], self.output_dim[1],
self.__class__.__name__)
raise Exception(error_str)
return result
def train(self, x, *args, **kwargs):
"""Update the internal structures according to the input data `x`.
`x` is a matrix having different variables on different columns
and observations on the rows.
By default, subclasses should overwrite `_train` to implement their
training phase. The docstring of the `_train` method overwrites this
docstring.
.. note::
A subclass supporting multiple training phases should implement
the *same* signature for all the training phases and document the
meaning of the arguments in the `_train` method doc-string. Having
consistent signatures is a requirement to use the node in a
node chain.
"""
if not self.is_trainable():
raise IsNotTrainableException(
"The node %s is not trainable." % self.__class__.__name__)
if not self.is_training():
err_str = "The training phase of node %s has already finished." \
% self.__class__.__name__
raise TrainingFinishedException(err_str)
# in case of homogenous data control the dimension x
if self.homogenous:
self._check_input(x)
self._check_train_args(x, *args, **kwargs)
self._train_phase_started = True
self._train_seq[self._train_phase][0](self._refcast(x), *args, **kwargs)
def stop_training(self, *args, **kwargs):
"""Stop the training phase.
By default, subclasses should overwrite `_stop_training` to implement
this functionality. The docstring of the `_stop_training` method
overwrites this docstring.
"""
if self.is_training() and self._train_phase_started == False:
raise TrainingException(
"The node %s has not been trained. "
% self.__class__.__name__ +
"Check if you specified training data or a validation scheme" +
" (splitter). Furthermore you should check the node " +
"parameters. Did you specify relevant labels correct?")
if not self.is_training():
err_str = "The training phase of node %s has already finished."\
% self.__class__.__name__
raise TrainingFinishedException(err_str)
# close the current phase.
self._train_seq[self._train_phase][1](*args, **kwargs)
self._train_phase += 1
self._train_phase_started = False
# check if we have some training phase left
if self.get_remaining_train_phase() == 0:
self._training = False
def __call__(self, x, *args, **kwargs):
"""Calling an instance of `Node` is equivalent to calling
its `execute` method."""
return self.execute(x, *args, **kwargs)
###### string representation
def __str__(self):
return str(type(self).__name__)
def __repr__(self):
# print input_dim, output_dim, dtype
name = type(self).__name__
inp = "input_dim=%s" % str(self.input_dim)
out = "output_dim=%s" % str(self.output_dim)
if self.dtype is None:
typ = 'dtype=None'
else:
typ = "dtype='%s'" % self.dtype.name
args = ', '.join((inp, out, typ))
return name + '(' + args + ')'
def copy(self, protocol=None):
"""Return a deep copy of the node.
:param protocol: the pickle protocol (deprecated).
.. todo:: check if needed
"""
if protocol is not None:
warnings.warn("protocol parameter to copy() is ignored")
return copy.deepcopy(self)
def save(self, filename, protocol=-1):
"""Save a pickled serialization of the node to `filename`.
If `filename` is None, return a string.
Note: the pickled `Node` is not guaranteed to be forwards or
backwards compatible.
.. todo:: check if needed
"""
if filename is None:
return cPickle.dumps(self, protocol)
else:
# if protocol != 0 open the file in binary mode
mode = 'wb' if protocol != 0 else 'w'
with open(filename, mode) as flh:
cPickle.dump(self, flh, protocol)
def get_metadata(self, key):
if not self.input_node is None:
return self.input_node.get_metadata(key)
else:
return None
# Specify special node names, different to standard names
_NODE_MAPPING = {"Noop": BaseNode}
| bsd-3-clause |
nanolearningllc/edx-platform-cypress | cms/djangoapps/contentstore/views/checklist.py | 85 | 6198 | import json
import copy
from util.json_request import JsonResponse
from django.http import HttpResponseBadRequest
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from django.http import HttpResponseNotFound
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
from student.auth import has_course_author_access
from xmodule.course_module import CourseDescriptor
from django.utils.translation import ugettext
__all__ = ['checklists_handler']
# pylint: disable=unused-argument
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def checklists_handler(request, course_key_string, checklist_index=None):
"""
The restful handler for checklists.
GET
html: return html page for all checklists
json: return json representing all checklists. checklist_index is not supported for GET at this time.
POST or PUT
json: updates the checked state for items within a particular checklist. checklist_index is required.
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
json_request = 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json')
if request.method == 'GET':
# If course was created before checklists were introduced, copy them over
# from the template.
if not course_module.checklists:
course_module.checklists = CourseDescriptor.checklists.default
modulestore().update_item(course_module, request.user.id)
expanded_checklists = expand_all_action_urls(course_module)
if json_request:
return JsonResponse(expanded_checklists)
else:
handler_url = reverse_course_url('checklists_handler', course_key)
return render_to_response('checklists.html',
{
'handler_url': handler_url,
# context_course is used by analytics
'context_course': course_module,
'checklists': expanded_checklists
})
elif json_request:
# Can now assume POST or PUT because GET handled above.
if checklist_index is not None and 0 <= int(checklist_index) < len(course_module.checklists):
index = int(checklist_index)
persisted_checklist = course_module.checklists[index]
modified_checklist = json.loads(request.body)
# Only thing the user can modify is the "checked" state.
# We don't want to persist what comes back from the client because it will
# include the expanded action URLs (which are non-portable).
for item_index, item in enumerate(modified_checklist.get('items')):
persisted_checklist['items'][item_index]['is_checked'] = item['is_checked']
# seeming noop which triggers kvs to record that the metadata is
# not default
course_module.checklists = course_module.checklists
course_module.save()
modulestore().update_item(course_module, request.user.id)
expanded_checklist = expand_checklist_action_url(course_module, persisted_checklist)
return JsonResponse(localize_checklist_text(expanded_checklist))
else:
return HttpResponseBadRequest(
("Could not save checklist state because the checklist index "
"was out of range or unspecified."),
content_type="text/plain"
)
else:
return HttpResponseNotFound()
def expand_all_action_urls(course_module):
"""
Gets the checklists out of the course module and expands their action urls.
Returns a copy of the checklists with modified urls, without modifying the persisted version
of the checklists.
"""
expanded_checklists = []
for checklist in course_module.checklists:
expanded_checklists.append(localize_checklist_text(expand_checklist_action_url(course_module, checklist)))
return expanded_checklists
def expand_checklist_action_url(course_module, checklist):
"""
Expands the action URLs for a given checklist and returns the modified version.
The method does a copy of the input checklist and does not modify the input argument.
"""
expanded_checklist = copy.deepcopy(checklist)
urlconf_map = {
"ManageUsers": "course_team_handler",
"CourseOutline": "course_handler",
"SettingsDetails": "settings_handler",
"SettingsGrading": "grading_handler",
}
for item in expanded_checklist.get('items'):
action_url = item.get('action_url')
if action_url in urlconf_map:
item['action_url'] = reverse_course_url(urlconf_map[action_url], course_module.id)
return expanded_checklist
def localize_checklist_text(checklist):
"""
Localize texts for a given checklist and returns the modified version.
The method does an in-place operation so the input checklist is modified directly.
"""
# Localize checklist name
checklist['short_description'] = ugettext(checklist['short_description']) # pylint: disable=translation-of-non-string
# Localize checklist items
for item in checklist.get('items'):
item['short_description'] = ugettext(item['short_description']) # pylint: disable=translation-of-non-string
item['long_description'] = ugettext(item['long_description']) # pylint: disable=translation-of-non-string
item['action_text'] = ugettext(item['action_text']) # pylint: disable=translation-of-non-string
return checklist
| agpl-3.0 |
feketemihai/hr | hr_policy_ot/hr_policy_ot.py | 27 | 4399 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from pytz import common_timezones
from openerp.osv import fields, orm
class policy_ot(orm.Model):
_name = 'hr.policy.ot'
_columns = {
'name': fields.char('Name', size=128, required=True),
'date': fields.date('Effective Date', required=True),
'line_ids': fields.one2many(
'hr.policy.line.ot', 'policy_id', 'Policy Lines'),
}
# Return records with latest date first
_order = 'date desc'
def get_codes(self, cr, uid, idx, context=None):
res = []
[res.append((line.code, line.name, line.type, line.rate))
for line in self.browse(cr, uid, idx, context=context).line_ids]
return res
def daily_codes(self, cr, uid, idx, context=None):
res = []
[res.append((line.code, line.name))
for line in self.browse(
cr, uid, idx, context=context).line_ids if line.type == 'daily']
return res
def restday_codes(self, cr, uid, idx, context=None):
return [
(line.code, line.name)
for line in self.browse(cr, uid, idx, context=context).line_ids
if line.type == 'weekly' and line.active_after_units == 'day'
]
def restday2_codes(self, cr, uid, idx, context=None):
res = []
[res.append((line.code, line.name))
for line in self.browse(
cr, uid, idx, context=context).line_ids if line.type == 'restday']
return res
def weekly_codes(self, cr, uid, idx, context=None):
return [
(line.code, line.name)
for line in self.browse(cr, uid, idx, context=context).line_ids
if line.type == 'weekly' and line.active_after_units == 'min'
]
def holiday_codes(self, cr, uid, idx, context=None):
return [
(line.code, line.name)
for line in self.browse(cr, uid, idx, context=context).line_ids
if line.type == 'holiday'
]
class policy_line_ot(orm.Model):
_name = 'hr.policy.line.ot'
def _tz_list(self, cr, uid, context=None):
res = tuple()
for name in common_timezones:
res += ((name, name),)
return res
_columns = {
'name': fields.char('Name', size=64, required=True),
'policy_id': fields.many2one('hr.policy.ot', 'Policy'),
'type': fields.selection([('daily', 'Daily'),
('weekly', 'Weekly'),
('restday', 'Rest Day'),
('holiday', 'Public Holiday')],
'Type', required=True),
'weekly_working_days': fields.integer('Weekly Working Days'),
'active_after': fields.integer(
'Active After', help="Minutes after which this policy applies"),
'active_start_time': fields.char(
'Active Start Time', size=5, help="Time in 24 hour time format"),
'active_end_time': fields.char(
'Active End Time', size=5, help="Time in 24 hour time format"),
'tz': fields.selection(_tz_list, 'Time Zone'),
'rate': fields.float(
'Rate', required=True, help='Multiplier of employee wage.'),
'code': fields.char(
'Code', required=True, help="Use this code in the salary rules.")
}
class policy_group(orm.Model):
_name = 'hr.policy.group'
_inherit = 'hr.policy.group'
_columns = {
'ot_policy_ids': fields.many2many(
'hr.policy.ot', 'hr_policy_group_ot_rel',
'group_id', 'ot_id', 'Overtime Policy'),
}
| agpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/numpy/lib/shape_base.py | 10 | 28668 | from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape, transpose
from numpy.core.multiarray import normalize_axis_index
from numpy.core import vstack, atleast_3d
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
f = func1d(arr[ii + s_[:,] + kk])
Nj = f.shape
for jj in ndindex(Nj):
out[ii + jj + kk] = f[jj]
Equivalently, eliminating the inner loop, this can be expressed as::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nk):
out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
Parameters
----------
func1d : function (M,) -> (Nj...)
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray (Ni..., M, Nk...)
Input array.
args : any
Additional arguments to `func1d`.
kwargs : any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The output array. The shape of `out` is identical to the shape of
`arr`, except along the `axis` dimension. This axis is removed, and
replaced with new dimensions equal to the shape of the return value
of `func1d`. So if `func1d` returns a scalar `out` will have one
fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that returns a 1D array, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
For a function that returns a higher dimensional array, those dimensions
are inserted in place of the `axis` dimension.
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(np.diag, -1, b)
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
"""
# handle negative axes
arr = asanyarray(arr)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
# arr, with the iteration axis at the end
in_dims = list(range(nd))
inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
# compute indices for the iteration axes, and append a trailing ellipsis to
# prevent 0d arrays decaying to scalars, which fixes gh-8642
inds = ndindex(inarr_view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
# invoke the function on the first item
try:
ind0 = next(inds)
except StopIteration:
raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
# laid out so that each write is contiguous.
# for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
# permutation of axes such that out = buff.transpose(buff_permute)
buff_dims = list(range(buff.ndim))
buff_permute = (
buff_dims[0 : axis] +
buff_dims[buff.ndim-res.ndim : buff.ndim] +
buff_dims[axis : buff.ndim-res.ndim]
)
# matrices have a nasty __array_prepare__ and __array_wrap__
if not isinstance(res, matrix):
buff = res.__array_prepare__(buff)
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
if not isinstance(res, matrix):
# wrap the array, to preserve subclasses
buff = res.__array_wrap__(buff)
# finally, rotate the inserted axes back to where they belong
return transpose(buff, buff_permute)
else:
# matrices have to be transposed first, because they collapse dimensions!
out_arr = transpose(buff, buff_permute)
return res.__array_wrap__(out_arr)
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been available since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
.. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
``axis > a.ndim`` raised errors or put the new axis where documented.
Those axis values are now deprecated and will raise an AxisError in the
future.
Parameters
----------
a : array_like
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis > a.ndim or axis < -a.ndim - 1:
# 2017-05-17, 1.13.0
warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
"deprecated and will raise an AxisError in the future.",
DeprecationWarning, stacklevel=2)
# When the deprecation period expires, delete this if block,
if axis < 0:
axis = axis + a.ndim + 1
# and uncomment the following line.
# axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if _nx.ndim(sub_arys[i]) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis. For an array of length l that should be split
into n sections, it returns l % n sub-arrays of size l//n + 1
and the rest of size l//n.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if _nx.ndim(ary) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if _nx.ndim(ary) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Note : Although tile may be used for broadcasting, it is strongly
recommended to use numpy's broadcasting operations and functions.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
broadcast_to : Broadcast an array to a new shape
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
| mit |
jburel/openmicroscopy | components/tools/OmeroPy/src/omero/util/metadata_mapannotations.py | 5 | 7744 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Utilities for manipulating map-annotations used as metadata
"""
import logging
from omero.model import NamedValue
from omero.rtypes import rstring, unwrap
# For complicated reasons `from omero.sys import ParametersI` doesn't work
from omero_sys_ParametersI import ParametersI
log = logging.getLogger("omero.util.metadata_mapannotations")
class MapAnnotationPrimaryKeyException(Exception):
def __init__(self, message):
super(MapAnnotationPrimaryKeyException, self).__init__(message)
class CanonicalMapAnnotation(object):
"""
A canonical representation of a map-annotation for metadata use
This is based around the idea of a primary key derived from the
combination of the namespace with 1+ keys-value pairs. A null
namespace is treated as an empty string (''), but still forms part
of the primary key.
ma: The omero.model.MapAnnotation object
primary_keys: Keys from key-value pairs that will be used to form the
primary key.
"""
def __init__(self, ma, primary_keys=None):
# TODO: should we consider data and description
self.ma = ma
ns = unwrap(ma.getNs())
self.ns = ns if ns else ''
try:
mapvalue = [(kv.name, kv.value) for kv in ma.getMapValue()]
except TypeError:
mapvalue = []
self.kvpairs, self.primary = self.process_keypairs(
mapvalue, primary_keys)
self.parents = set()
def process_keypairs(self, kvpairs, primary_keys):
if len(set(kvpairs)) != len(kvpairs):
raise ValueError('Duplicate key-value pairs found: %s' % kvpairs)
if primary_keys:
primary_keys = set(primary_keys)
missing = primary_keys.difference(kv[0] for kv in kvpairs)
if missing:
raise MapAnnotationPrimaryKeyException(
'Missing primary key fields: %s' % missing)
# ns is always part of the primary key
primary = (
self.ns,
frozenset((k, v) for (k, v) in kvpairs if k in primary_keys))
else:
primary = None
return kvpairs, primary
def merge(self, other):
"""
Adds any key/value pairs from other that aren't in self
Adds parents from other
Does not update primary key
"""
if self.kvpairs != other.kvpairs:
kvpairsset = set(self.kvpairs)
for okv in other.kvpairs:
if okv not in kvpairsset:
self.kvpairs.append(okv)
self.merge_parents(other)
def merge_parents(self, other):
self.parents.update(other.parents)
def add_parent(self, parenttype, parentid):
"""
Add a parent descriptor
Parameter types are important because they are used in a set
parenttype: An OMERO type string
parentid: An OMERO object ID (integer)
"""
if not isinstance(parenttype, str) or not isinstance(
parentid, (int, long)):
raise ValueError('Expected parenttype:str parentid:integer')
self.parents.add((parenttype, parentid))
def get_mapann(self):
"""
Update and return an omero.model.MapAnnotation with merged/combined
fields
"""
mv = [NamedValue(*kv) for kv in self.kvpairs]
self.ma.setMapValue(mv)
self.ma.setNs(rstring(self.ns))
return self.ma
def get_parents(self):
return self.parents
def __str__(self):
return 'ns:%s primary:%s keyvalues:%s parents:%s id:%s' % (
self.ns, self.primary, self.kvpairs, self.parents,
unwrap(self.ma.getId()))
class MapAnnotationManager(object):
"""
Handles creation and de-duplication of MapAnnotations
"""
# Policies for combining/replacing MapAnnotations
MA_APPEND, MA_OLD, MA_NEW = range(3)
def __init__(self, combine=MA_APPEND):
"""
Ensure you understand the doc string for init_from_namespace_query
if not using MA_APPEND
"""
self.mapanns = {}
self.nokey = []
self.combine = combine
def add(self, cma):
"""
Adds a CanonicalMapAnnotation to the managed list.
Returns any CanonicalMapAnnotation that are no longer required,
this may be cma or it may be a previously added annotation.
The idea is that this can be used to de-duplicate existing OMERO
MapAnnotations by calling add() on all MapAnnotations and deleting
those which are returned
If MapAnnotations are combined the parents of the unwanted
MapAnnotations are appended to the one that is kept by the manager.
:param cma: A CanonicalMapAnnotation
"""
if cma.primary is None:
self.nokey.append(cma)
return
try:
current = self.mapanns[cma.primary]
if current.ma is cma.ma:
# Don't re-add an identical object
return
if self.combine == self.MA_APPEND:
current.merge(cma)
return cma
if self.combine == self.MA_NEW:
self.mapanns[cma.primary] = cma
cma.merge_parents(current)
return current
if self.combine == self.MA_OLD:
current.merge_parents(cma)
return cma
raise ValueError('Invalid combine policy')
except KeyError:
self.mapanns[cma.primary] = cma
def get_map_annotations(self):
return self.mapanns.values() + self.nokey
def add_from_namespace_query(self, session, ns, primary_keys):
"""
Fetches all map-annotations with the given namespace
This will only work if there are no duplicates, otherwise an
exception will be thrown
WARNING: You should probably only use this in MA_APPEND mode since
the parents of existing annotations aren't fetched (requires a query
for each parent type)
WARNING: This may be resource intensive
TODO: Use omero.utils.populate_metadata._QueryContext for batch queries
:param session: An OMERO session
:param ns: The namespace
:param primary_keys: Primary keys
"""
qs = session.getQueryService()
q = 'FROM MapAnnotation WHERE ns=:ns ORDER BY id DESC'
p = ParametersI()
p.addString('ns', ns)
results = qs.findAllByQuery(q, p)
log.debug('Found %d MapAnnotations in ns:%s', len(results), ns)
for ma in results:
cma = CanonicalMapAnnotation(ma, primary_keys)
r = self.add(cma)
if r:
raise Exception(
'Duplicate MapAnnotation primary key: id:%s %s' % (
unwrap(ma.getId()), str(r)))
| gpl-2.0 |
jricks92/aws-s3info | s3info.py | 1 | 18566 | #!/usr/bin/python
#
# Copyright 2017 Jameson Ricks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original idea taken from http://www.slsmk.com/getting-the-size-of-an-s3-bucket-using-boto3-for-aws
# Added human readable output, thread concurrency, profile support, and easy ability to pipe total to another program
import sys
import datetime
import collections
import concurrent.futures
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
print('''
ERROR: You must have the boto3 package installed in your python environment to run
this script! You can install it by running:
pip install boto3
''')
sys.exit(1)
class Session:
## Default variables
now = datetime.datetime.now()
num_workers = 10
quiet = False
single_thread = False
raw_bytes = False
profile = False
no_comma = False
report_mode = False
profile = None
region_csv = False
# Output column widths
f_col_width = 85
l_col_width = 25
# Array of different S3 Storage types
storage_types = ['StandardStorage', 'StandardIAStorage',
'ReducedRedundancyStorage', 'GlacierObjectOverhead']
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] # For human readable
# Keep running total
total = 0
total_objects = 0
# Strings
size_str = "Size"
# Header Line for the output going to standard out
header = '\nBucket'.ljust(f_col_width) + size_str.rjust(l_col_width) + "\n"
header += '-' * (f_col_width + l_col_width)
# Buckets
all_buckets = {}
# Results
results = collections.OrderedDict()
def get_bucket_region(self, bucket, s3_client):
try:
region = s3_client.get_bucket_location(Bucket=bucket)["LocationConstraint"]
except ClientError as e:
if not self.quiet:
print("Error on bucket: %s" % bucket)
print(e)
return
if region == None:
region = 'us-east-1'
self.all_buckets[bucket] = region
# Gets the correct cloudwatch client depending on the region
def get_cloudwatch_client(self, bucket, session):
if session.region_name == self.all_buckets[bucket]:
cw_client = session.client('cloudwatch')
return cw_client
else:
new_session = boto3.Session(region_name=self.all_buckets[bucket])
cw_client = new_session.client('cloudwatch')
return cw_client
# Outputs the results of the process
def print_results(self):
if self.region_csv:
self.print_regions_csv()
return
# Print header line
if not self.quiet:
print(self.header)
# Sort the buckets
results_sorted = collections.OrderedDict(sorted(self.results.items()))
for bucket in results_sorted:
for st_type in results_sorted[bucket]:
if st_type != "NumberOfObjects":
# Print storage type with bucket
bucket_name = "%s (%s)" % (bucket, st_type)
# Append number of items per bucket
bucket_bytes = "(" + str(results_sorted[bucket]["NumberOfObjects"])
if int(results_sorted[bucket]["NumberOfObjects"]) > 1:
bucket_bytes += " Items) "
else:
bucket_bytes += " Item) "
# Check arguments to ensure correct output
if self.raw_bytes:
if self.no_comma:
bucket_bytes += str(results_sorted[bucket][st_type])
else:
bucket_bytes += str("{:,}".format(results_sorted[bucket][st_type]))
else:
bucket_bytes += humansize(results_sorted[bucket][st_type], self.suffixes)
# Print out each line
if not self.quiet:
print(bucket_name.ljust(self.f_col_width) +
bucket_bytes.rjust(self.l_col_width))
if not self.quiet:
print('-' * (self.f_col_width + self.l_col_width))
if self.report_mode:
print(self.total)
else:
if self.raw_bytes:
if self.no_comma:
print("Total bytes stored in S3:".ljust(self.f_col_width) + str(int(self.total)).rjust(self.l_col_width))
else:
print("Total bytes stored in S3:".ljust(self.f_col_width) + str("{:,}".format(int(self.total))).rjust(self.l_col_width))
else:
print("Total stored in S3:".ljust(self.f_col_width) + humansize(self.total, self.suffixes).rjust(self.l_col_width))
# Prints the region totals in csv format
def print_regions_csv(self):
header_line = "Region,"
for st_type in self.storage_types:
header_line += st_type + ","
header_line += "Total Files,Total Bytes,"
print(header_line)
results = dict(self.results)
totals = collections.OrderedDict()
for bucket in self.all_buckets:
if bucket in results:
# Check if region name is already stored in dictionary
if self.all_buckets[bucket] not in totals:
# Region Totals
totals[self.all_buckets[bucket]] = {
"StandardStorage": 0,
"StandardIAStorage": 0,
"ReducedRedundancyStorage": 0,
"GlacierObjectOverhead": 0,
"TotalFiles": 0,
"TotalBytes": 0,
}
if 'StandardStorage' in results[bucket]:
totals[self.all_buckets[bucket]]['StandardStorage'] += results[bucket]['StandardStorage']
totals[self.all_buckets[bucket]]['TotalBytes'] += results[bucket]['StandardStorage']
if 'StandardIAStorage' in results[bucket]:
totals[self.all_buckets[bucket]]['StandardIAStorage'] += results[bucket]['StandardIAStorage']
totals[self.all_buckets[bucket]]['TotalBytes'] += results[bucket]['StandardIAStorage']
if 'ReducedRedundancyStorage' in results[bucket]:
totals[self.all_buckets[bucket]]['ReducedRedundancyStorage'] += results[bucket]['ReducedRedundancyStorage']
totals[self.all_buckets[bucket]]['TotalBytes'] += results[bucket]['ReducedRedundancyStorage']
if 'GlacierObjectOverhead' in results[bucket]:
totals[self.all_buckets[bucket]]['GlacierObjectOverhead'] += results[bucket]['GlacierObjectOverhead']
totals[self.all_buckets[bucket]]['TotalBytes'] += results[bucket]['GlacierObjectOverhead']
if 'NumberOfObjects' in results[bucket]:
totals[self.all_buckets[bucket]]['TotalFiles'] += results[bucket]['NumberOfObjects']
for region in totals:
line = "%s," % region
line += "%s," % totals[region]['StandardStorage']
line += "%s," % totals[region]['StandardIAStorage']
line += "%s," % totals[region]['ReducedRedundancyStorage']
line += "%s," % totals[region]['GlacierObjectOverhead']
line += "%s," % totals[region]['TotalFiles']
line += "%s," % totals[region]['TotalBytes']
print(line)
# Function definition for getting all bucket info
def get_bucket_storage(self, bucket, aws_session):
# Get correct CloudWatch client
cw_client = self.get_cloudwatch_client(bucket, aws_session)
# For each bucket item, look up the total size from CloudWatch
for st_type in self.storage_types:
response = cw_client.get_metric_statistics(Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName',
'Value': bucket},
{'Name': 'StorageType',
'Value': st_type},
],
Statistics=['Average'],
Period=3600,
StartTime=(
self.now - datetime.timedelta(days=1)).isoformat(),
EndTime=self.now.isoformat()
)
# The cloudwatch metrics will have the single datapoint, so we just report on it.
for item in response["Datapoints"]:
# Create a blank dictionary if we don't have anything yet.
if bucket not in self.results:
self.results[bucket] = {}
self.results[bucket][st_type] = int(item["Average"])
# Add to running total
self.total += int(item["Average"])
# For each bucket item, look up the total size from CloudWatch
response = cw_client.get_metric_statistics(Namespace='AWS/S3',
MetricName='NumberOfObjects',
Dimensions=[
{'Name': 'BucketName',
'Value': bucket},
{'Name': 'StorageType',
'Value': 'AllStorageTypes'},
],
Statistics=['Average'],
Period=3600,
StartTime=(
self.now - datetime.timedelta(days=1)).isoformat(),
EndTime=self.now.isoformat()
)
# The cloudwatch metrics will have the single datapoint, so we just report on it.
for item in response["Datapoints"]:
# Create a blank dictionary if we don't have anything yet.
if bucket not in self.results:
self.results[bucket] = {}
self.results[bucket]["NumberOfObjects"] = int(item["Average"])
# Add to running total
self.total_objects += int(item["Average"])
# Gets all s3 buckets in region for session
def get_s3_buckets(session_obj, session):
if not session_obj.quiet:
print("Getting S3 bucket information...")
s3_client = get_s3_client(session)
# Get bucket location
if session_obj.single_thread:
for bucket in s3_client.list_buckets()['Buckets']:
session_obj.get_bucket_region(bucket['Name'], s3_client)
else:
# Use multi-threading to make it faster
with concurrent.futures.ThreadPoolExecutor(max_workers=session_obj.num_workers) as executer:
future_to_bucket = {executer.submit(session_obj.get_bucket_region, bucket['Name'], s3_client): bucket for bucket in s3_client.list_buckets()['Buckets']}
# Loops through each s3 Bucket
def list_bucket_info(session_obj, session):
if session_obj.single_thread:
for bucket in session_obj.all_buckets:
session_obj.get_bucket_storage(bucket, session)
else:
# Use multi-threading to make it faster
with concurrent.futures.ThreadPoolExecutor(max_workers=session_obj.num_workers) as executer:
future_to_bucket = {executer.submit(
session_obj.get_bucket_storage, bucket, session): bucket for bucket in session_obj.all_buckets}
# For parsing input arguments
def parse_args(argv, session):
# Show help
if ("--help" in argv) or ("-h") in argv:
print_help()
sys.exit(0)
# Set number of concurrent workers
if any("--workers" in a for a in argv):
## pull number of workers from arg and set global variable
session.num_workers = int(
[a for a in argv if "--workers" in a][0][10:])
# Set single threaded mode
if "--single-thread" in argv:
session.single_thread = True
# Show raw byte values
if ("--raw-bytes" in argv) or ("-r" in argv):
session.raw_bytes = True
# Don't output commas
if ("--no-commas" in argv) or ("--no-comma" in argv) or ("-nc" in argv):
session.no_comma = True
# Get AWS CLI profile
if any("--profile" in a for a in argv):
session.profile = [a for a in argv if "--profile" in a][0][10:]
# Turn on quiet mode
if ("-q" in argv) or ("--quiet" in argv):
session.quiet = True
# Turn on report mode
if "--report-mode" in argv:
session.report_mode = True
session.quiet = True
session.no_comma = True
session.raw_bytes = True
if "--region-csv" in argv:
session.report_mode = True
session.quiet = True
session.no_comma = True
session.raw_bytes = True
session.region_csv = True
if session.raw_bytes:
session.size_str = "Size in Bytes"
# Prints human readable sizes (source: https://stackoverflow.com/questions/14996453/python-libraries-to-calculate-human-readable-filesize-from-bytes)
def humansize(nbytes, suffixes):
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
# Getting the AWS session
def get_boto_session(session):
aws_session = boto3.Session()
if session.profile:
# Get session for profile
aws_session = boto3.Session(profile_name=session.profile)
return aws_session
# Get S3 Client
def get_s3_client(aws_session):
return aws_session.client('s3')
def print_help():
print('''usage: ./s3-info.py [-h | --help] [-q | --quiet] [--profile=<profile>]
[--workers=<# of threads>] [--single-thread] [--raw-bytes]
[--no-commas] [--report-mode]
''')
print('''DESCRIPTION
Use this tool to output your total S3 usage in an AWS account. It can display your total usage
in human readable format (e.g., KB, MB, GB, TB, PB) or in total bytes. You can display usage per
bucket according to type of storage (Standard, Infrequently Accessed, Reduced Redundancy, or
Glacier Objects). Buckets that have multiple types of storage will be listed twice. This tool
uses concurrent threads to speed up the process. By default, this tool uses the default profile
stored in your ~/.aws/config file. IMPORTANT: Make sure you have a default region associated
with your profile!
NOTE: You must have the boto3 package installed in your python environment to correctly run this
script.
-h OR --help Shows this help message.
-q OR --quiet Supresses output for each bucket and only shows totals.
--profile=<profile> Uses the specified profile stored in your ~/.aws/config file.
--workers=<number> Specifies a specific number of threads to parse through each S3
bucket (default is 10). More threads may speed up the process if
you have a large number of S3 buckets in your account.
--single-thread Runs this script using one thread. This is useful if you want to
see all your buckets output in alphabetical order. Using this
flag will take longer to loop through all your S3 buckets.
--raw-bytes Using this option, you can output each bucket size in bytes
instead of KB, MB, GB, or PB.
--no-comma OR -nc Used in conjuction with --raw-bytes, does not output commas in
numbers.
--region-csv Prints out a csv format of aggregated data by region. This option
automatically turns on --quiet mode. It's best to pipe this output
to a file. Example:
./s3info.py --region-csv > s3-report.csv
NOTE: This mode will suppress all AccessDenied errors for buckets.
Make sure the access keys/profile you are using has permission to
read all your buckets.
--report-mode This option only outputs the number of bytes without commas to
the console. This allows the output to be piped to a variable,
function, etc. This option automatically turns on --quiet,
--raw-bytes, and --no-comma flags. NOTE: This mode will suppress
all AccessDenied errors for buckets. Make sure the access keys/profile
you are using has permission to read all your buckets.
''')
## Main function
def main(argv):
session = Session()
parse_args(argv, session)
aws_session = get_boto_session(session)
get_s3_buckets(session, aws_session)
list_bucket_info(session, aws_session)
session.print_results()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
unigent/OpenWrt-Firefly-SDK | staging_dir/host/lib/scons-2.3.1/SCons/Tool/tlib.py | 8 | 1902 | """SCons.Tool.tlib
XXX
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tlib.py 2014/03/02 14:18:15 garyo"
import SCons.Tool
import SCons.Tool.bcc32
import SCons.Util
def generate(env):
SCons.Tool.bcc32.findIt('tlib', env)
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'tlib'
env['ARFLAGS'] = SCons.Util.CLVar('')
env['ARCOM'] = '$AR $TARGET $ARFLAGS /a $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return SCons.Tool.bcc32.findIt('tlib', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
collects/VTK | Common/Core/Testing/Python/TestMutable.py | 18 | 1602 | """Test the vtk.mutable() type and test pass-by-reference.
Created on Sept 19, 2010 by David Gobbi
"""
import sys
import vtk
from vtk.test import Testing
class TestMutable(Testing.vtkTest):
def testFloatMutable(self):
m = vtk.mutable(3.0)
n = vtk.mutable(4.0)
m *= 2
self.assertEqual(m, 6.0)
self.assertEqual(str(m), str(m.get()))
o = n + m
self.assertEqual(o, 10.0)
def testIntMutable(self):
m = vtk.mutable(3)
n = vtk.mutable(4)
m |= n
self.assertEqual(m, 7.0)
self.assertEqual(str(m), str(m.get()))
def testStringMutable(self):
m = vtk.mutable("%s %s!")
m %= ("hello", "world")
self.assertEqual(m, "hello world!")
def testPassByReference(self):
t = vtk.mutable(0.0)
p0 = (0.5, 0.0, 0.0)
n = (1.0, 0.0, 0.0)
p1 = (0.0, 0.0, 0.0)
p2 = (1.0, 1.0, 1.0)
x = [0.0, 0.0, 0.0]
vtk.vtkPlane.IntersectWithLine(p1, p2, n, p0, t, x)
self.assertEqual(round(t,6), 0.5)
self.assertEqual(round(x[0],6), 0.5)
self.assertEqual(round(x[1],6), 0.5)
self.assertEqual(round(x[2],6), 0.5)
t.set(0)
p = vtk.vtkPlane()
p.SetOrigin(0.5, 0.0, 0.0)
p.SetNormal(1.0, 0.0, 0.0)
p.IntersectWithLine(p1, p2, t, x)
self.assertEqual(round(t,6), 0.5)
self.assertEqual(round(x[0],6), 0.5)
self.assertEqual(round(x[1],6), 0.5)
self.assertEqual(round(x[2],6), 0.5)
if __name__ == "__main__":
Testing.main([(TestMutable, 'test')])
| bsd-3-clause |
shravan-achar/servo | tests/wpt/web-platform-tests/tools/py/py/_path/svnwc.py | 176 | 43848 | """
svn-Command based Implementation of a Subversion WorkingCopy Path.
SvnWCCommandPath is the main class.
"""
import os, sys, time, re, calendar
import py
import subprocess
from py._path import common
#-----------------------------------------------------------
# Caching latest repository revision and repo-paths
# (getting them is slow with the current implementations)
#
# XXX make mt-safe
#-----------------------------------------------------------
class cache:
proplist = {}
info = {}
entries = {}
prop = {}
class RepoEntry:
def __init__(self, url, rev, timestamp):
self.url = url
self.rev = rev
self.timestamp = timestamp
def __str__(self):
return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
class RepoCache:
""" The Repocache manages discovered repository paths
and their revisions. If inside a timeout the cache
will even return the revision of the root.
"""
timeout = 20 # seconds after which we forget that we know the last revision
def __init__(self):
self.repos = []
def clear(self):
self.repos = []
def put(self, url, rev, timestamp=None):
if rev is None:
return
if timestamp is None:
timestamp = time.time()
for entry in self.repos:
if url == entry.url:
entry.timestamp = timestamp
entry.rev = rev
#print "set repo", entry
break
else:
entry = RepoEntry(url, rev, timestamp)
self.repos.append(entry)
#print "appended repo", entry
def get(self, url):
now = time.time()
for entry in self.repos:
if url.startswith(entry.url):
if now < entry.timestamp + self.timeout:
#print "returning immediate Etrny", entry
return entry.url, entry.rev
return entry.url, -1
return url, -1
repositories = RepoCache()
# svn support code
ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
if sys.platform == "win32":
ALLOWED_CHARS += ":"
ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
def _getsvnversion(ver=[]):
try:
return ver[0]
except IndexError:
v = py.process.cmdexec("svn -q --version")
v.strip()
v = '.'.join(v.split('.')[:2])
ver.append(v)
return v
def _escape_helper(text):
text = str(text)
if py.std.sys.platform != 'win32':
text = str(text).replace('$', '\\$')
return text
def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
for c in str(text):
if c.isalnum():
continue
if c in allowed_chars:
continue
return True
return False
def checkbadchars(url):
# (hpk) not quite sure about the exact purpose, guido w.?
proto, uri = url.split("://", 1)
if proto != "file":
host, uripath = uri.split('/', 1)
# only check for bad chars in the non-protocol parts
if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
raise ValueError("bad char in %r" % (url, ))
#_______________________________________________________________
class SvnPathBase(common.PathBase):
""" Base implementation for SvnPath implementations. """
sep = '/'
def _geturl(self):
return self.strpath
url = property(_geturl, None, None, "url of this svn-path.")
def __str__(self):
""" return a string representation (including rev-number) """
return self.strpath
def __hash__(self):
return hash(self.strpath)
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts::
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
obj = object.__new__(self.__class__)
obj.rev = kw.get('rev', self.rev)
obj.auth = kw.get('auth', self.auth)
dirname, basename, purebasename, ext = self._getbyspec(
"dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
ext = kw.setdefault('ext', ext)
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
if kw['basename']:
obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
else:
obj.strpath = "%(dirname)s" % kw
return obj
def _getbyspec(self, spec):
""" get specified parts of the path. 'arg' is a string
with comma separated path parts. The parts are returned
in exactly the order of the specification.
you may specify the following parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
res = []
parts = self.strpath.split(self.sep)
for name in spec.split(','):
name = name.strip()
if name == 'dirname':
res.append(self.sep.join(parts[:-1]))
elif name == 'basename':
res.append(parts[-1])
else:
basename = parts[-1]
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
res.append(purebasename)
elif name == 'ext':
res.append(ext)
else:
raise NameError("Don't know part %r" % name)
return res
def __eq__(self, other):
""" return true if path and rev attributes each match """
return (str(self) == str(other) and
(self.rev == other.rev or self.rev == other.rev))
def __ne__(self, other):
return not self == other
def join(self, *args):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
args = tuple([arg.strip(self.sep) for arg in args])
parts = (self.strpath, ) + args
newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
return newpath
def propget(self, name):
""" return the content of the given property. """
value = self._propget(name)
return value
def proplist(self):
""" list all property names. """
content = self._proplist()
return content
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
# shared help methods
def _escape(self, cmd):
return _escape_helper(cmd)
#def _childmaxrev(self):
# """ return maximum revision number of childs (or self.rev if no childs) """
# rev = self.rev
# for name, info in self._listdir_nameinfo():
# rev = max(rev, info.created_rev)
# return rev
#def _getlatestrevision(self):
# """ return latest repo-revision for this path. """
# url = self.strpath
# path = self.__class__(url, None)
#
# # we need a long walk to find the root-repo and revision
# while 1:
# try:
# rev = max(rev, path._childmaxrev())
# previous = path
# path = path.dirpath()
# except (IOError, process.cmdexec.Error):
# break
# if rev is None:
# raise IOError, "could not determine newest repo revision for %s" % self
# return rev
class Checkers(common.Checkers):
def dir(self):
try:
return self.path.info().kind == 'dir'
except py.error.Error:
return self._listdirworks()
def _listdirworks(self):
try:
self.path.listdir()
except py.error.ENOENT:
return False
else:
return True
def file(self):
try:
return self.path.info().kind == 'file'
except py.error.ENOENT:
return False
def exists(self):
try:
return self.path.info()
except py.error.ENOENT:
return self._listdirworks()
def parse_apr_time(timestr):
i = timestr.rfind('.')
if i == -1:
raise ValueError("could not parse %s" % timestr)
timestr = timestr[:i]
parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
return time.mktime(parsedtime)
class PropListDict(dict):
""" a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
def __init__(self, path, keynames):
dict.__init__(self, [(x, None) for x in keynames])
self.path = path
def __getitem__(self, key):
value = dict.__getitem__(self, key)
if value is None:
value = self.path.propget(key)
dict.__setitem__(self, key, value)
return value
def fixlocale():
if sys.platform != 'win32':
return 'LC_ALL=C '
return ''
# some nasty chunk of code to solve path and url conversion and quoting issues
ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ')
if os.sep in ILLEGAL_CHARS:
ILLEGAL_CHARS.remove(os.sep)
ISWINDOWS = sys.platform == 'win32'
_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
def _check_path(path):
illegal = ILLEGAL_CHARS[:]
sp = path.strpath
if ISWINDOWS:
illegal.remove(':')
if not _reg_allow_disk.match(sp):
raise ValueError('path may not contain a colon (:)')
for char in sp:
if char not in string.printable or char in illegal:
raise ValueError('illegal character %r in path' % (char,))
def path_to_fspath(path, addat=True):
_check_path(path)
sp = path.strpath
if addat and path.rev != -1:
sp = '%s@%s' % (sp, path.rev)
elif addat:
sp = '%s@HEAD' % (sp,)
return sp
def url_from_path(path):
fspath = path_to_fspath(path, False)
quote = py.std.urllib.quote
if ISWINDOWS:
match = _reg_allow_disk.match(fspath)
fspath = fspath.replace('\\', '/')
if match.group(1):
fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
quote(fspath[len(match.group(1)):]))
else:
fspath = quote(fspath)
else:
fspath = quote(fspath)
if path.rev != -1:
fspath = '%s@%s' % (fspath, path.rev)
else:
fspath = '%s@HEAD' % (fspath,)
return 'file://%s' % (fspath,)
class SvnAuth(object):
""" container for auth information for Subversion """
def __init__(self, username, password, cache_auth=True, interactive=True):
self.username = username
self.password = password
self.cache_auth = cache_auth
self.interactive = interactive
def makecmdoptions(self):
uname = self.username.replace('"', '\\"')
passwd = self.password.replace('"', '\\"')
ret = []
if uname:
ret.append('--username="%s"' % (uname,))
if passwd:
ret.append('--password="%s"' % (passwd,))
if not self.cache_auth:
ret.append('--no-auth-cache')
if not self.interactive:
ret.append('--non-interactive')
return ' '.join(ret)
def __str__(self):
return "<SvnAuth username=%s ...>" %(self.username,)
rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
class SvnWCCommandPath(common.PathBase):
""" path implementation offering access/modification to svn working copies.
It has methods similar to the functions in os.path and similar to the
commands of the svn client.
"""
sep = os.sep
def __new__(cls, wcpath=None, auth=None):
self = object.__new__(cls)
if isinstance(wcpath, cls):
if wcpath.__class__ == cls:
return wcpath
wcpath = wcpath.localpath
if _check_for_bad_chars(str(wcpath),
ALLOWED_CHARS):
raise ValueError("bad char in wcpath %s" % (wcpath, ))
self.localpath = py.path.local(wcpath)
self.auth = auth
return self
strpath = property(lambda x: str(x.localpath), None, None, "string path")
rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
def __eq__(self, other):
return self.localpath == getattr(other, 'localpath', None)
def _geturl(self):
if getattr(self, '_url', None) is None:
info = self.info()
self._url = info.url #SvnPath(info.url, info.rev)
assert isinstance(self._url, py.builtin._basestring)
return self._url
url = property(_geturl, None, None, "url of this WC item")
def _escape(self, cmd):
return _escape_helper(cmd)
def dump(self, obj):
""" pickle object into path location"""
return self.localpath.dump(obj)
def svnurl(self):
""" return current SvnPath for this WC-item. """
info = self.info()
return py.path.svnurl(info.url)
def __repr__(self):
return "svnwc(%r)" % (self.strpath) # , self._url)
def __str__(self):
return str(self.localpath)
def _makeauthoptions(self):
if self.auth is None:
return ''
return self.auth.makecmdoptions()
def _authsvn(self, cmd, args=None):
args = args and list(args) or []
args.append(self._makeauthoptions())
return self._svn(cmd, *args)
def _svn(self, cmd, *args):
l = ['svn %s' % cmd]
args = [self._escape(item) for item in args]
l.extend(args)
l.append('"%s"' % self._escape(self.strpath))
# try fixing the locale because we can't otherwise parse
string = fixlocale() + " ".join(l)
try:
try:
key = 'LC_MESSAGES'
hold = os.environ.get(key)
os.environ[key] = 'C'
out = py.process.cmdexec(string)
finally:
if hold:
os.environ[key] = hold
else:
del os.environ[key]
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
strerr = e.err.lower()
if strerr.find('not found') != -1:
raise py.error.ENOENT(self)
elif strerr.find("E200009:") != -1:
raise py.error.ENOENT(self)
if (strerr.find('file exists') != -1 or
strerr.find('file already exists') != -1 or
strerr.find('w150002:') != -1 or
strerr.find("can't create directory") != -1):
raise py.error.EEXIST(strerr) #self)
raise
return out
def switch(self, url):
""" switch to given URL. """
self._authsvn('switch', [url])
def checkout(self, url=None, rev=None):
""" checkout from url to local wcpath. """
args = []
if url is None:
url = self.url
if rev is None or rev == -1:
if (py.std.sys.platform != 'win32' and
_getsvnversion() == '1.3'):
url += "@HEAD"
else:
if _getsvnversion() == '1.3':
url += "@%d" % rev
else:
args.append('-r' + str(rev))
args.append(url)
self._authsvn('co', args)
def update(self, rev='HEAD', interactive=True):
""" update working copy item to given revision. (None -> HEAD). """
opts = ['-r', rev]
if not interactive:
opts.append("--non-interactive")
self._authsvn('up', opts)
def write(self, content, mode='w'):
""" write content into local filesystem wc. """
self.localpath.write(content, mode)
def dirpath(self, *args):
""" return the directory Path of the current Path. """
return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
def _ensuredirs(self):
parent = self.dirpath()
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
self.mkdir()
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'directory=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if p.check():
if p.check(versioned=False):
p.add()
return p
if kwargs.get('dir', 0):
return p._ensuredirs()
parent = p.dirpath()
parent._ensuredirs()
p.write("")
p.add()
return p
def mkdir(self, *args):
""" create & return the directory joined with args. """
if args:
return self.join(*args).mkdir()
else:
self._svn('mkdir')
return self
def add(self):
""" add ourself to svn """
self._svn('add')
def remove(self, rec=1, force=1):
""" remove a file or a directory tree. 'rec'ursive is
ignored and considered always true (because of
underlying svn semantics.
"""
assert rec, "svn cannot remove non-recursively"
if not self.check(versioned=True):
# not added to svn (anymore?), just remove
py.path.local(self).remove()
return
flags = []
if force:
flags.append('--force')
self._svn('remove', *flags)
def copy(self, target):
""" copy path to target."""
py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
def rename(self, target):
""" rename this path to target. """
py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
def lock(self):
""" set a lock (exclusive) on the resource """
out = self._authsvn('lock').strip()
if not out:
# warning or error, raise exception
raise ValueError("unknown error in svn lock command")
def unlock(self):
""" unset a previously set lock """
out = self._authsvn('unlock').strip()
if out.startswith('svn:'):
# warning or error, raise exception
raise Exception(out[4:])
def cleanup(self):
""" remove any locks from the resource """
# XXX should be fixed properly!!!
try:
self.unlock()
except:
pass
def status(self, updates=0, rec=0, externals=0):
""" return (collective) Status object for this file. """
# http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
# 2201 2192 jum test
# XXX
if externals:
raise ValueError("XXX cannot perform status() "
"on external items yet")
else:
#1.2 supports: externals = '--ignore-externals'
externals = ''
if rec:
rec= ''
else:
rec = '--non-recursive'
# XXX does not work on all subversion versions
#if not externals:
# externals = '--ignore-externals'
if updates:
updates = '-u'
else:
updates = ''
try:
cmd = 'status -v --xml --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
except py.process.cmdexec.Error:
cmd = 'status -v --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
rootstatus = WCStatus(self).fromstring(out, self)
else:
rootstatus = XMLWCStatus(self).fromstring(out, self)
return rootstatus
def diff(self, rev=None):
""" return a diff of the current path against revision rev (defaulting
to the last one).
"""
args = []
if rev is not None:
args.append("-r %d" % rev)
out = self._authsvn('diff', args)
return out
def blame(self):
""" return a list of tuples of three elements:
(revision, commiter, line)
"""
out = self._svn('blame')
result = []
blamelines = out.splitlines()
reallines = py.path.svnurl(self.url).readlines()
for i, (blameline, line) in enumerate(
zip(blamelines, reallines)):
m = rex_blame.match(blameline)
if not m:
raise ValueError("output line %r of svn blame does not match "
"expected format" % (line, ))
rev, name, _ = m.groups()
result.append((int(rev), name, line))
return result
_rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
def commit(self, msg='', rec=1):
""" commit with support for non-recursive commits """
# XXX i guess escaping should be done better here?!?
cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
if not rec:
cmd += ' -N'
out = self._authsvn(cmd)
try:
del cache.info[self]
except KeyError:
pass
if out:
m = self._rex_commit.match(out)
return int(m.group(1))
def propset(self, name, value, *args):
""" set property name to value on this path. """
d = py.path.local.mkdtemp()
try:
p = d.join('value')
p.write(value)
self._svn('propset', name, '--file', str(p), *args)
finally:
d.remove()
def propget(self, name):
""" get property name on this path. """
res = self._svn('propget', name)
return res[:-1] # strip trailing newline
def propdel(self, name):
""" delete property name on this path. """
res = self._svn('propdel', name)
return res[:-1] # strip trailing newline
def proplist(self, rec=0):
""" return a mapping of property names to property values.
If rec is True, then return a dictionary mapping sub-paths to such mappings.
"""
if rec:
res = self._svn('proplist -R')
return make_recursive_propdict(self, res)
else:
res = self._svn('proplist')
lines = res.split('\n')
lines = [x.strip() for x in lines[1:]]
return PropListDict(self, lines)
def revert(self, rec=0):
""" revert the local changes of this path. if rec is True, do so
recursively. """
if rec:
result = self._svn('revert -R')
else:
result = self._svn('revert')
return result
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
if kw:
localpath = self.localpath.new(**kw)
else:
localpath = self.localpath
return self.__class__(localpath, auth=self.auth)
def join(self, *args, **kwargs):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
localpath = self.localpath.join(*args, **kwargs)
return self.__class__(localpath, auth=self.auth)
def info(self, usecache=1):
""" return an Info structure with svn-provided information. """
info = usecache and cache.info.get(self)
if not info:
try:
output = self._svn('info')
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('Path is not a working copy directory') != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find("is not under version control") != -1:
raise py.error.ENOENT(self, e.err)
raise
# XXX SVN 1.3 has output on stderr instead of stdout (while it does
# return 0!), so a bit nasty, but we assume no output is output
# to stderr...
if (output.strip() == '' or
output.lower().find('not a versioned resource') != -1):
raise py.error.ENOENT(self, output)
info = InfoSvnWCCommand(output)
# Can't reliably compare on Windows without access to win32api
if py.std.sys.platform != 'win32':
if info.path != self.localpath:
raise py.error.ENOENT(self, "not a versioned resource:" +
" %s != %s" % (info.path, self.localpath))
cache.info[self] = info
return info
def listdir(self, fil=None, sort=None):
""" return a sequence of Paths.
listdir will return either a tuple or a list of paths
depending on implementation choices.
"""
if isinstance(fil, str):
fil = common.FNMatcher(fil)
# XXX unify argument naming with LocalPath.listdir
def notsvn(path):
return path.basename != '.svn'
paths = []
for localpath in self.localpath.listdir(notsvn):
p = self.__class__(localpath, auth=self.auth)
if notsvn(p) and (not fil or fil(p)):
paths.append(p)
self._sortlist(paths, sort)
return paths
def open(self, mode='r'):
""" return an opened file with the given mode. """
return open(self.strpath, mode)
def _getbyspec(self, spec):
return self.localpath._getbyspec(spec)
class Checkers(py.path.local.Checkers):
def __init__(self, path):
self.svnwcpath = path
self.path = path.localpath
def versioned(self):
try:
s = self.svnwcpath.info()
except (py.error.ENOENT, py.error.EEXIST):
return False
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('is not a working copy')!=-1:
return False
if e.err.lower().find('not a versioned resource') != -1:
return False
raise
else:
return True
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
assert self.check() # make it simpler for the pipe
rev_start = rev_start is None and "HEAD" or rev_start
rev_end = rev_end is None and "HEAD" or rev_end
if rev_start == "HEAD" and rev_end == 1:
rev_opt = ""
else:
rev_opt = "-r %s:%s" % (rev_start, rev_end)
verbose_opt = verbose and "-v" or ""
locale_env = fixlocale()
# some blather on stderr
auth_opt = self._makeauthoptions()
#stdin, stdout, stderr = os.popen3(locale_env +
# 'svn log --xml %s %s %s "%s"' % (
# rev_opt, verbose_opt, auth_opt,
# self.strpath))
cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
rev_opt, verbose_opt, auth_opt, self.strpath)
popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout, stderr = popen.communicate()
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
minidom,ExpatError = importxml()
try:
tree = minidom.parseString(stdout)
except ExpatError:
raise ValueError('no such revision')
result = []
for logentry in filter(None, tree.firstChild.childNodes):
if logentry.nodeType == logentry.ELEMENT_NODE:
result.append(LogEntry(logentry))
return result
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
def __hash__(self):
return hash((self.strpath, self.__class__, self.auth))
class WCStatus:
attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
'deleted', 'prop_modified', 'unknown', 'update_available',
'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
)
def __init__(self, wcpath, rev=None, modrev=None, author=None):
self.wcpath = wcpath
self.rev = rev
self.modrev = modrev
self.author = author
for name in self.attrnames:
setattr(self, name, [])
def allpath(self, sort=True, **kw):
d = {}
for name in self.attrnames:
if name not in kw or kw[name]:
for path in getattr(self, name):
d[path] = 1
l = d.keys()
if sort:
l.sort()
return l
# XXX a bit scary to assume there's always 2 spaces between username and
# path, however with win32 allowing spaces in user names there doesn't
# seem to be a more solid approach :(
_rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" return a new WCStatus object from data 's'
"""
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
for line in data.split('\n'):
if not line.strip():
continue
#print "processing %r" % line
flags, rest = line[:8], line[8:]
# first column
c0,c1,c2,c3,c4,c5,x6,c7 = flags
#if '*' in line:
# print "flags", repr(flags), "rest", repr(rest)
if c0 in '?XI':
fn = line.split(None, 1)[1]
if c0 == '?':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.unknown.append(wcpath)
elif c0 == 'X':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(fn, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
elif c0 == 'I':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.ignored.append(wcpath)
continue
#elif c0 in '~!' or c4 == 'S':
# raise NotImplementedError("received flag %r" % c0)
m = WCStatus._rex_status.match(rest)
if not m:
if c7 == '*':
fn = rest.strip()
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.update_available.append(wcpath)
continue
if line.lower().find('against revision:')!=-1:
update_rev = int(rest.split(':')[1].strip())
continue
if line.lower().find('status on external') > -1:
# XXX not sure what to do here... perhaps we want to
# store some state instead of just continuing, as right
# now it makes the top-level external get added twice
# (once as external, once as 'normal' unchanged item)
# because of the way SVN presents external items
continue
# keep trying
raise ValueError("could not parse line %r" % line)
else:
rev, modrev, author, fn = m.groups()
wcpath = rootwcpath.join(fn, abs=1)
#assert wcpath.check()
if c0 == 'M':
assert wcpath.check(file=1), "didn't expect a directory with changed content here"
rootstatus.modified.append(wcpath)
elif c0 == 'A' or c3 == '+' :
rootstatus.added.append(wcpath)
elif c0 == 'D':
rootstatus.deleted.append(wcpath)
elif c0 == 'C':
rootstatus.conflict.append(wcpath)
elif c0 == '~':
rootstatus.kindmismatch.append(wcpath)
elif c0 == '!':
rootstatus.incomplete.append(wcpath)
elif c0 == 'R':
rootstatus.replaced.append(wcpath)
elif not c0.strip():
rootstatus.unchanged.append(wcpath)
else:
raise NotImplementedError("received flag %r" % c0)
if c1 == 'M':
rootstatus.prop_modified.append(wcpath)
# XXX do we cover all client versions here?
if c2 == 'L' or c5 == 'K':
rootstatus.locked.append(wcpath)
if c7 == '*':
rootstatus.update_available.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
if update_rev:
rootstatus.update_rev = update_rev
continue
return rootstatus
fromstring = staticmethod(fromstring)
class XMLWCStatus(WCStatus):
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" parse 'data' (XML string as outputted by svn st) into a status obj
"""
# XXX for externals, the path is shown twice: once
# with external information, and once with full info as if
# the item was a normal non-external... the current way of
# dealing with this issue is by ignoring it - this does make
# externals appear as external items as well as 'normal',
# unchanged ones in the status object so this is far from ideal
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
minidom, ExpatError = importxml()
try:
doc = minidom.parseString(data)
except ExpatError:
e = sys.exc_info()[1]
raise ValueError(str(e))
urevels = doc.getElementsByTagName('against')
if urevels:
rootstatus.update_rev = urevels[-1].getAttribute('revision')
for entryel in doc.getElementsByTagName('entry'):
path = entryel.getAttribute('path')
statusel = entryel.getElementsByTagName('wc-status')[0]
itemstatus = statusel.getAttribute('item')
if itemstatus == 'unversioned':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.unknown.append(wcpath)
continue
elif itemstatus == 'external':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(path, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
continue
elif itemstatus == 'ignored':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.ignored.append(wcpath)
continue
elif itemstatus == 'incomplete':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.incomplete.append(wcpath)
continue
rev = statusel.getAttribute('revision')
if itemstatus == 'added' or itemstatus == 'none':
rev = '0'
modrev = '?'
author = '?'
date = ''
elif itemstatus == "replaced":
pass
else:
#print entryel.toxml()
commitel = entryel.getElementsByTagName('commit')[0]
if commitel:
modrev = commitel.getAttribute('revision')
author = ''
author_els = commitel.getElementsByTagName('author')
if author_els:
for c in author_els[0].childNodes:
author += c.nodeValue
date = ''
for c in commitel.getElementsByTagName('date')[0]\
.childNodes:
date += c.nodeValue
wcpath = rootwcpath.join(path, abs=1)
assert itemstatus != 'modified' or wcpath.check(file=1), (
'did\'t expect a directory with changed content here')
itemattrname = {
'normal': 'unchanged',
'unversioned': 'unknown',
'conflicted': 'conflict',
'none': 'added',
}.get(itemstatus, itemstatus)
attr = getattr(rootstatus, itemattrname)
attr.append(wcpath)
propsstatus = statusel.getAttribute('props')
if propsstatus not in ('none', 'normal'):
rootstatus.prop_modified.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
rootstatus.date = date
# handle repos-status element (remote info)
rstatusels = entryel.getElementsByTagName('repos-status')
if rstatusels:
rstatusel = rstatusels[0]
ritemstatus = rstatusel.getAttribute('item')
if ritemstatus in ('added', 'modified'):
rootstatus.update_available.append(wcpath)
lockels = entryel.getElementsByTagName('lock')
if len(lockels):
rootstatus.locked.append(wcpath)
return rootstatus
fromstring = staticmethod(fromstring)
class InfoSvnWCCommand:
def __init__(self, output):
# Path: test
# URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
# Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
# Revision: 2151
# Node Kind: directory
# Schedule: normal
# Last Changed Author: hpk
# Last Changed Rev: 2100
# Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
# Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
d = {}
for line in output.split('\n'):
if not line.strip():
continue
key, value = line.split(':', 1)
key = key.lower().replace(' ', '')
value = value.strip()
d[key] = value
try:
self.url = d['url']
except KeyError:
raise ValueError("Not a versioned resource")
#raise ValueError, "Not a versioned resource %r" % path
self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
try:
self.rev = int(d['revision'])
except KeyError:
self.rev = None
self.path = py.path.local(d['path'])
self.size = self.path.size()
if 'lastchangedrev' in d:
self.created_rev = int(d['lastchangedrev'])
if 'lastchangedauthor' in d:
self.last_author = d['lastchangedauthor']
if 'lastchangeddate' in d:
self.mtime = parse_wcinfotime(d['lastchangeddate'])
self.time = self.mtime * 1000000
def __eq__(self, other):
return self.__dict__ == other.__dict__
def parse_wcinfotime(timestr):
""" Returns seconds since epoch, UTC. """
# example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
if not m:
raise ValueError("timestring %r does not match" % timestr)
timestr, timezone = m.groups()
# do not handle timezone specially, return value should be UTC
parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
return calendar.timegm(parsedtime)
def make_recursive_propdict(wcroot,
output,
rex = re.compile("Properties on '(.*)':")):
""" Return a dictionary of path->PropListDict mappings. """
lines = [x for x in output.split('\n') if x]
pdict = {}
while lines:
line = lines.pop(0)
m = rex.match(line)
if not m:
raise ValueError("could not parse propget-line: %r" % line)
path = m.groups()[0]
wcpath = wcroot.join(path, abs=1)
propnames = []
while lines and lines[0].startswith(' '):
propname = lines.pop(0).strip()
propnames.append(propname)
assert propnames, "must have found properties!"
pdict[wcpath] = PropListDict(wcpath, propnames)
return pdict
def importxml(cache=[]):
if cache:
return cache
from xml.dom import minidom
from xml.parsers.expat import ExpatError
cache.extend([minidom, ExpatError])
return cache
class LogEntry:
def __init__(self, logentry):
self.rev = int(logentry.getAttribute('revision'))
for lpart in filter(None, logentry.childNodes):
if lpart.nodeType == lpart.ELEMENT_NODE:
if lpart.nodeName == 'author':
self.author = lpart.firstChild.nodeValue
elif lpart.nodeName == 'msg':
if lpart.firstChild:
self.msg = lpart.firstChild.nodeValue
else:
self.msg = ''
elif lpart.nodeName == 'date':
#2003-07-29T20:05:11.598637Z
timestr = lpart.firstChild.nodeValue
self.date = parse_apr_time(timestr)
elif lpart.nodeName == 'paths':
self.strpaths = []
for ppart in filter(None, lpart.childNodes):
if ppart.nodeType == ppart.ELEMENT_NODE:
self.strpaths.append(PathEntry(ppart))
def __repr__(self):
return '<Logentry rev=%d author=%s date=%s>' % (
self.rev, self.author, self.date)
| mpl-2.0 |
gicking/STM8_templates | Projects/STM8_StdPeriphLib_Examples/Minimal-C/STVD_Cosmic/upload.py | 18 | 2135 | #!/usr/bin/python
'''
Upload Cosmic S19 file via STM8 bootloader
'''
# required modules
import os
import platform
# set path of s19 file
HEXFILE = './Release/STM8_Template.s19'
#HEXFILE = './Debug/STM8_Template.s19'
# set tool path
ROOT_DIR = '../../../../'
TOOL_DIR = ROOT_DIR + 'Tools/'
# determine operating system
OS = platform.system()
#print OS
# set OS specific
FLASHTOOL = ''
PORT = ''
if OS == 'Windows':
PORT = 'COM10'
FLASHTOOL = 'stm8gal.exe'
else:
PORT = '/dev/ttyUSB0'
FLASHTOOL = 'stm8gal'
# set general
TERMINAL = False
##################
# helper functions
##################
#########
def getchar():
"""
python equivalent of getchar()
"""
ch = 0
if OS == 'Windows':
import msvcrt as m
ch = m.getch()
sys.stdio.flush()
sys.stderr.flush()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# end getchar()
#########
def get_exitcode_stdout_stderr(cmd):
"""
execute the external command and get its exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
return exitcode, out, err
##################
# main program
##################
# on success upload to STM8
cmd = TOOL_DIR+FLASHTOOL+' -p '+PORT+' -w '+HEXFILE+' -v'
if OS == 'Windows':
cmd = cmd.replace('/','\\')
#print cmd
#exitcode, out, err = get_exitcode_stdout_stderr(cmd)
exitcode = os.system(cmd)
if (exitcode != 0):
#sys.stderr.write(err+'\n')
#sys.stderr.write('error '+str(exitcode)+'\n\n')
getchar()
exit()
# for non-UART project skip opening terminal
if TERMINAL == False:
sys.stdout.write('press return to exit')
getchar()
print('\n')
exit()
# on success open terminal
cmd = 'python '+TOOL_DIR+'terminal.py -p '+PORT
exitcode = os.system(cmd)
if (exitcode != 0):
sys.stderr.write('error '+str(exitcode)+'\n\n')
getchar()
exit()
# END OF MODULE
| apache-2.0 |
benjaminjkraft/django | tests/template_tests/syntax_tests/test_verbatim.py | 521 | 1658 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class VerbatimTagTests(SimpleTestCase):
@setup({'verbatim-tag01': '{% verbatim %}{{bare }}{% endverbatim %}'})
def test_verbatim_tag01(self):
output = self.engine.render_to_string('verbatim-tag01')
self.assertEqual(output, '{{bare }}')
@setup({'verbatim-tag02': '{% verbatim %}{% endif %}{% endverbatim %}'})
def test_verbatim_tag02(self):
output = self.engine.render_to_string('verbatim-tag02')
self.assertEqual(output, '{% endif %}')
@setup({'verbatim-tag03': '{% verbatim %}It\'s the {% verbatim %} tag{% endverbatim %}'})
def test_verbatim_tag03(self):
output = self.engine.render_to_string('verbatim-tag03')
self.assertEqual(output, 'It\'s the {% verbatim %} tag')
@setup({'verbatim-tag04': '{% verbatim %}{% verbatim %}{% endverbatim %}{% endverbatim %}'})
def test_verbatim_tag04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('verbatim-tag04')
@setup({'verbatim-tag05': '{% verbatim %}{% endverbatim %}{% verbatim %}{% endverbatim %}'})
def test_verbatim_tag05(self):
output = self.engine.render_to_string('verbatim-tag05')
self.assertEqual(output, '')
@setup({'verbatim-tag06': '{% verbatim special %}'
'Don\'t {% endverbatim %} just yet{% endverbatim special %}'})
def test_verbatim_tag06(self):
output = self.engine.render_to_string('verbatim-tag06')
self.assertEqual(output, 'Don\'t {% endverbatim %} just yet')
| bsd-3-clause |
LIKAIMO/MissionPlanner | Lib/email/message.py | 59 | 31517 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
import uu
import binascii
import warnings
from cStringIO import StringIO
# Intrapackage imports
import email.charset
from email import utils
from email import errors
SEMISPACE = '; '
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
# Helper functions
def _splitparam(param):
# Split header parameters. BAW: this may be too simple. It isn't
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
# found in the wild. We may eventually need a full fledged parser
# eventually.
a, sep, b = param.partition(';')
if not sep:
return a.strip(), None
return a.strip(), b.strip()
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true. If value is a
three tuple (charset, language, value), it will be encoded according
to RFC2231 rules.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrence of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self):
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
This includes the headers, body, and envelope header.
"""
return self.as_string(unixfrom=True)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This is a convenience method and may not generate the message exactly
as you intend because by default it mangles lines that begin with
"From ". For more flexibility, use the flatten() method of a
Generator instance.
"""
from email.generator import Generator
fp = StringIO()
g = Generator(fp)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
self._payload.append(payload)
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
if i is None:
payload = self._payload
elif not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
else:
payload = self._payload[i]
if decode:
if self.is_multipart():
return None
cte = self.get('content-transfer-encoding', '').lower()
if cte == 'quoted-printable':
return utils._qdecode(payload)
elif cte == 'base64':
try:
return utils._bdecode(payload)
except binascii.Error:
# Incorrect padding
return payload
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
sfp = StringIO()
try:
uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
payload = sfp.getvalue()
except uu.Error:
# Some decoding problem
return payload
# Everything else, including encodings with 8bit or 7bit are returned
# unchanged.
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if isinstance(charset, basestring):
charset = email.charset.Charset(charset)
if not isinstance(charset, email.charset.Charset):
raise TypeError(charset)
# BAW: should we accept strings that can serve as arguments to the
# Charset constructor?
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if isinstance(self._payload, unicode):
self._payload = self._payload.encode(charset.output_charset)
if str(charset) != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except TypeError:
self._payload = charset.body_encode(self._payload)
self.add_header('Content-Transfer-Encoding', cte)
def get_charset(self):
"""Return the Charset instance associated with the message's payload.
"""
return self._charset
#
# MAPPING INTERFACE (partial)
#
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __getitem__(self, name):
"""Get a header value.
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, exactly which
occurrence gets returned is undefined. Use get_all() to get all
the values matching a header field name.
"""
return self.get(name)
def __setitem__(self, name, val):
"""Set the value of a header.
Note: this does not overwrite an existing header with the same field
name. Use __delitem__() first to delete any existing headers.
"""
self._headers.append((name, val))
def __delitem__(self, name):
"""Delete all occurrences of a header, if present.
Does not raise an exception if the header is missing.
"""
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
def __contains__(self, name):
return name.lower() in [k.lower() for k, v in self._headers]
def has_key(self, name):
"""Return true if the message contains the header."""
missing = object()
return self.get(name, missing) is not missing
def keys(self):
"""Return a list of all the message's header field names.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def get(self, name, failobj=None):
"""Get a header value.
Like __getitem__() but return failobj instead of None when the field
is missing.
"""
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
return v
return failobj
#
# Additional useful stuff
#
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(v)
if not values:
return failobj
return values
def add_header(self, _name, _value, **_params):
"""Extended header setting.
name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added. If a
parameter value contains non-ASCII characters it must be specified as a
three-tuple of (charset, language, value), in which case it will be
encoded according to RFC2231 rules.
Example:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
"""
parts = []
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
if _value is not None:
parts.insert(0, _value)
self._headers.append((_name, SEMISPACE.join(parts)))
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = (k, _value)
break
else:
raise KeyError(_name)
#
# Use these three methods instead of the three above.
#
def get_content_type(self):
"""Return the message's content type.
The returned string is coerced to lower case of the form
`maintype/subtype'. If there was no Content-Type header in the
message, the default type as given by get_default_type() will be
returned. Since according to RFC 2045, messages always have a default
type this will always return a value.
RFC 2045 defines a message's default type to be text/plain unless it
appears inside a multipart/digest container, in which case it would be
message/rfc822.
"""
missing = object()
value = self.get('content-type', missing)
if value is missing:
# This should have no parameters
return self.get_default_type()
ctype = _splitparam(value)[0].lower()
# RFC 2045, section 5.2 says if its invalid, use text/plain
if ctype.count('/') != 1:
return 'text/plain'
return ctype
def get_content_maintype(self):
"""Return the message's main content type.
This is the `maintype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[0]
def get_content_subtype(self):
"""Returns the message's sub-content type.
This is the `subtype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[1]
def get_default_type(self):
"""Return the `default' content type.
Most messages have a default content type of text/plain, except for
messages that are subparts of multipart/digest containers. Such
subparts have a default content type of message/rfc822.
"""
return self._default_type
def set_default_type(self, ctype):
"""Set the `default' content type.
ctype should be either "text/plain" or "message/rfc822", although this
is not enforced. The default content type is not stored in the
Content-Type header.
"""
self._default_type = ctype
def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(';' + value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except ValueError:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
"""Return the message's Content-Type parameters, as a list.
The elements of the returned list are 2-tuples of key/value pairs, as
split on the `=' sign. The left hand side of the `=' is the key,
while the right hand side is the value. If there is no `=' sign in
the parameter the value is the empty string. The value is as
described in the get_param() method.
Optional failobj is the object to return if there is no Content-Type
header. Optional header is the header to search instead of
Content-Type. If unquote is True, the value is unquoted.
"""
missing = object()
params = self._get_params_preserve(missing, header)
if params is missing:
return failobj
if unquote:
return [(k, _unquotevalue(v)) for k, v in params]
else:
return params
def get_param(self, param, failobj=None, header='content-type',
unquote=True):
"""Return the parameter value if found in the Content-Type header.
Optional failobj is the object to return if there is no Content-Type
header, or the Content-Type header has no such parameter. Optional
header is the header to search instead of Content-Type.
Parameter keys are always compared case insensitively. The return
value can either be a string, or a 3-tuple if the parameter was RFC
2231 encoded. When it's a 3-tuple, the elements of the value are of
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
LANGUAGE can be None, in which case you should consider VALUE to be
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
Your application should be prepared to deal with 3-tuple return
values, and can convert the parameter to a Unicode string like so:
param = msg.get_param('foo')
if isinstance(param, tuple):
param = unicode(param[2], param[0] or 'us-ascii')
In any case, the parameter value (either the returned string, or the
VALUE item in the 3-tuple) is always unquoted, unless unquote is set
to False.
"""
if header not in self:
return failobj
for k, v in self._get_params_preserve(failobj, header):
if k.lower() == param.lower():
if unquote:
return _unquotevalue(v)
else:
return v
return failobj
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language=''):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
del self[header]
self[header] = ctype
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype
def set_type(self, type, header='Content-Type', requote=True):
"""Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header.
"""
# BAW: should we be strict?
if not type.count('/') == 1:
raise ValueError
# Set the Content-Type, you get a MIME-Version
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
# Skip the first param; it's the old type.
for p, v in params[1:]:
self.set_param(p, v, header, requote)
def get_filename(self, failobj=None):
"""Return the filename associated with the payload if present.
The filename is extracted from the Content-Disposition header's
`filename' parameter, and it is unquoted. If that header is missing
the `filename' parameter, this method falls back to looking for the
`name' parameter.
"""
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
The boundary is extracted from the Content-Type header's `boundary'
parameter, and it is unquoted.
"""
missing = object()
boundary = self.get_param('boundary', missing)
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
This is subtly different than deleting the Content-Type header and
adding a new one with a new boundary parameter via add_header(). The
main difference is that using the set_boundary() method preserves the
order of the Content-Type header in the original message.
HeaderParseError is raised if the message has no Content-Type header.
"""
missing = object()
params = self._get_params_preserve(missing, 'content-type')
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
newparams.append(('boundary', '"%s"' % boundary))
foundp = True
else:
newparams.append((pk, pv))
if not foundp:
# The original Content-Type header had no boundary attribute.
# Tack one on the end. BAW: should we raise an exception
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
parts = []
for k, v in newparams:
if v == '':
parts.append(k)
else:
parts.append('%s=%s' % (k, v))
newheaders.append((h, SEMISPACE.join(parts)))
else:
newheaders.append((h, v))
self._headers = newheaders
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
charset = unicode(charset[2], pcharset).encode('us-ascii')
except (LookupError, UnicodeError):
charset = charset[2]
# charset character must be in us-ascii range
try:
if isinstance(charset, str):
charset = unicode(charset, 'us-ascii')
charset = charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower()
def get_charsets(self, failobj=None):
"""Return a list containing the charset(s) used in this message.
The returned list of items describes the Content-Type headers'
charset parameter for this message and all the subparts in its
payload.
Each item will either be a string (the value of the charset parameter
in the Content-Type header of that part) or the value of the
'failobj' parameter (defaults to None), if the part does not have a
main MIME type of "text", or the charset is not defined.
The list will contain one string for each part of the message, plus
one for the container message (i.e. self), so that a non-multipart
message will still return a list of length 1.
"""
return [part.get_content_charset(failobj) for part in self.walk()]
# I.e. def walk(self): ...
from email.iterators import walk
| gpl-3.0 |
giane88/tinyProject | run.py | 1 | 1774 | #!/usr/bin/python
N_MOTES = 10
DBG_CHANNELS = "default error"
SIM_TIME = 300
TOPO_FILE = "linkgain.out"
#NOISE_FILE = "/opt/tinyos-2.1.0/tos/lib/tossim/noise/casino-lab.txt"
NOISE_FILE = "/home/mgianello/tinyos-main/tos/lib/tossim/noise/meyer-heavy.txt"
from TOSSIM import *
from tinyos.tossim.TossimApp import *
from random import *
import sys
t = Tossim([])
r = t.radio()
fs = open("sensor.log", "w")
t.randomSeed(1)
for channel in DBG_CHANNELS.split():
t.addChannel(channel, sys.stdout)
t.addChannel("sensor", fs)
#add gain links
f = open(TOPO_FILE, "r")
lines = f.readlines()
for line in lines:
s = line.split()
if (len(s) > 0):
if s[0] == "gain":
r.add(int(s[1]), int(s[2]), float(s[3]))
elif s[0] == "noise":
r.setNoise(int(s[1]), float(s[2]), float(s[3]))
#add noise trace
noise = open(NOISE_FILE, "r")
lines = noise.readlines()
for line in lines:
str = line.strip()
if (str != ""):
val = int(float(str))
for i in range(0, N_MOTES):
t.getNode(i).addNoiseTraceReading(val)
for i in range (0, N_MOTES):
time=i * t.ticksPerSecond() / 100
m=t.getNode(i)
m.bootAtTime(time)
m.createNoiseModel()
print "Booting ", i, " at ~ ", time*1000/t.ticksPerSecond(), "ms"
time = t.time()
lastTime = -1
while (time + SIM_TIME * t.ticksPerSecond() > t.time()):
timeTemp = int(t.time()/(t.ticksPerSecond()*10))
if( timeTemp > lastTime ): #stampa un segnale ogni 10 secondi... per leggere meglio il log
lastTime = timeTemp
print "----------------------------------SIMULATION: ~", lastTime*10, " s ----------------------"
t.runNextEvent()
print "----------------------------------END OF SIMULATION-------------------------------------"
| gpl-2.0 |
gdub/mysql-connector-python | lib/mysql/connector/pooling.py | 1 | 11822 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing pooling of connections to MySQL servers.
"""
import re
from uuid import uuid4
# pylint: disable=F0401
try:
import queue
except ImportError:
# Python v2
import Queue as queue
# pylint: enable=F0401
import threading
from . import errors
from .connection import MySQLConnection
CONNECTION_POOL_LOCK = threading.RLock()
CNX_POOL_ARGS = ('pool_name', 'pool_size', 'pool_cnx')
CNX_POOL_MAXSIZE = 32
CNX_POOL_MAXNAMESIZE = 64
CNX_POOL_NAMEREGEX = re.compile(r'[^a-zA-Z0-9._:\-*$#]')
def generate_pool_name(**kwargs):
"""Generate a pool name
This function takes keyword arguments, usually the connection
arguments for MySQLConnection, and tries to generate a name for
a pool.
Raises PoolError when no name can be generated.
Returns a string.
"""
parts = []
for key in ('host', 'port', 'user', 'database'):
try:
parts.append(str(kwargs[key]))
except KeyError:
pass
if not parts:
raise errors.PoolError(
"Failed generating pool name; specify pool_name")
return '_'.join(parts)
class PooledMySQLConnection(object):
"""Class holding a MySQL Connection in a pool
PooledMySQLConnection is used by MySQLConnectionPool to return an
instance holding a MySQL connection. It works like a MySQLConnection
except for methods like close() and config().
The close()-method will add the connection back to the pool rather
than disconnecting from the MySQL server.
Configuring the connection have to be done through the MySQLConnectionPool
method set_config(). Using config() on pooled connection will raise a
PoolError.
"""
def __init__(self, pool, cnx):
"""Initialize
The pool argument must be an instance of MySQLConnectionPoll. cnx
if an instance of MySQLConnection.
"""
if not isinstance(pool, MySQLConnectionPool):
raise AttributeError(
"pool should be a MySQLConnectionPool")
if not isinstance(cnx, MySQLConnection):
raise AttributeError(
"cnx should be a MySQLConnection")
self._cnx_pool = pool
self._cnx = cnx
def __getattr__(self, attr):
"""Calls attributes of the MySQLConnection instance"""
return getattr(self._cnx, attr)
def close(self):
"""Do not close, but add connection back to pool
The close() method does not close the connection with the
MySQL server. The connection is added back to the pool so it
can be reused.
When the pool is configured to reset the session, the session
state will be cleared by re-authenticating the user.
"""
cnx = self._cnx
if self._cnx_pool.reset_session:
cnx.reset_session()
self._cnx_pool.add_connection(cnx)
self._cnx = None
def config(self, **kwargs):
"""Configuration is done through the pool"""
raise errors.PoolError(
"Configuration for pooled connections should "
"be done through the pool itself."
)
@property
def pool_name(self):
"""Return the name of the connection pool"""
return self._cnx_pool.pool_name
class MySQLConnectionPool(object):
"""Class defining a pool of MySQL connections"""
def __init__(self, pool_size=5, pool_name=None, pool_reset_session=True,
**kwargs):
"""Initialize
Initialize a MySQL connection pool with a maximum number of
connections set to pool_size. The rest of the keywords
arguments, kwargs, are configuration arguments for MySQLConnection
instances.
"""
self._pool_size = None
self._pool_name = None
self._reset_session = pool_reset_session
self._set_pool_size(pool_size)
self._set_pool_name(pool_name or generate_pool_name(**kwargs))
self._cnx_config = {}
self._cnx_queue = queue.Queue(self._pool_size)
self._config_version = uuid4()
if kwargs:
self.set_config(**kwargs)
cnt = 0
while cnt < self._pool_size:
self.add_connection()
cnt += 1
@property
def pool_name(self):
"""Return the name of the connection pool"""
return self._pool_name
@property
def pool_size(self):
"""Return number of connections managed by the pool"""
return self._pool_size
@property
def reset_session(self):
"""Return whether to reset session"""
return self._reset_session
def set_config(self, **kwargs):
"""Set the connection configuration for MySQLConnection instances
This method sets the configuration used for creating MySQLConnection
instances. See MySQLConnection for valid connection arguments.
Raises PoolError when a connection argument is not valid, missing
or not supported by MySQLConnection.
"""
if not kwargs:
return
with CONNECTION_POOL_LOCK:
try:
test_cnx = MySQLConnection()
test_cnx.config(**kwargs)
self._cnx_config = kwargs
self._config_version = uuid4()
except AttributeError as err:
raise errors.PoolError(
"Connection configuration not valid: {0}".format(err))
def _set_pool_size(self, pool_size):
"""Set the size of the pool
This method sets the size of the pool but it will not resize the pool.
Raises an AttributeError when the pool_size is not valid. Invalid size
is 0, negative or higher than pooling.CNX_POOL_MAXSIZE.
"""
if pool_size <= 0 or pool_size > CNX_POOL_MAXSIZE:
raise AttributeError(
"Pool size should be higher than 0 and "
"lower or equal to {0}".format(CNX_POOL_MAXSIZE))
self._pool_size = pool_size
def _set_pool_name(self, pool_name):
r"""Set the name of the pool
This method checks the validity and sets the name of the pool.
Raises an AttributeError when pool_name contains illegal characters
([^a-zA-Z0-9._\-*$#]) or is longer than pooling.CNX_POOL_MAXNAMESIZE.
"""
if CNX_POOL_NAMEREGEX.search(pool_name):
raise AttributeError(
"Pool name '{0}' contains illegal characters".format(pool_name))
if len(pool_name) > CNX_POOL_MAXNAMESIZE:
raise AttributeError(
"Pool name '{0}' is too long".format(pool_name))
self._pool_name = pool_name
def _queue_connection(self, cnx):
"""Put connection back in the queue
This method is putting a connection back in the queue. It will not
acquire a lock as the methods using _queue_connection() will have it
set.
Raises PoolError on errors.
"""
if not isinstance(cnx, MySQLConnection):
raise errors.PoolError(
"Connection instance not subclass of MySQLConnection.")
try:
self._cnx_queue.put(cnx, block=False)
except queue.Full:
errors.PoolError("Failed adding connection; queue is full")
def add_connection(self, cnx=None):
"""Add a connection to the pool
This method instantiates a MySQLConnection using the configuration
passed when initializing the MySQLConnectionPool instance or using
the set_config() method.
If cnx is a MySQLConnection instance, it will be added to the
queue.
Raises PoolError when no configuration is set, when no more
connection can be added (maximum reached) or when the connection
can not be instantiated.
"""
with CONNECTION_POOL_LOCK:
if not self._cnx_config:
raise errors.PoolError(
"Connection configuration not available")
if self._cnx_queue.full():
raise errors.PoolError(
"Failed adding connection; queue is full")
if not cnx:
cnx = MySQLConnection(**self._cnx_config)
# pylint: disable=W0201,W0212
cnx._pool_config_version = self._config_version
# pylint: enable=W0201,W0212
else:
if not isinstance(cnx, MySQLConnection):
raise errors.PoolError(
"Connection instance not subclass of MySQLConnection.")
self._queue_connection(cnx)
def get_connection(self):
"""Get a connection from the pool
This method returns an PooledMySQLConnection instance which
has a reference to the pool that created it, and the next available
MySQL connection.
When the MySQL connection is not connect, a reconnect is attempted.
Raises PoolError on errors.
Returns a PooledMySQLConnection instance.
"""
with CONNECTION_POOL_LOCK:
try:
cnx = self._cnx_queue.get(block=False)
except queue.Empty:
raise errors.PoolError(
"Failed getting connection; pool exhausted")
# pylint: disable=W0201,W0212
if not cnx.is_connected() \
or self._config_version != cnx._pool_config_version:
cnx.config(**self._cnx_config)
try:
cnx.reconnect()
except errors.InterfaceError:
# Failed to reconnect, give connection back to pool
self._queue_connection(cnx)
raise
cnx._pool_config_version = self._config_version
# pylint: enable=W0201,W0212
return PooledMySQLConnection(self, cnx)
def _remove_connections(self):
"""Close all connections
This method closes all connections. It returns the number
of connections it closed.
Used mostly for tests.
Returns int.
"""
with CONNECTION_POOL_LOCK:
cnt = 0
cnxq = self._cnx_queue
while cnxq.qsize():
try:
cnx = cnxq.get(block=False)
cnx.disconnect()
cnt += 1
except queue.Empty:
return cnt
except errors.PoolError:
raise
except errors.Error:
# Any other error when closing means connection is closed
pass
return cnt
| gpl-2.0 |
maestrano/odoo | addons/delivery/__init__.py | 376 | 1103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery
import partner
import sale
import stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
halfcrazy/sqlalchemy | lib/sqlalchemy/schema.py | 59 | 1182 | # schema.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatibility namespace for sqlalchemy.sql.schema and related.
"""
from .sql.base import (
SchemaVisitor
)
from .sql.schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
DefaultGenerator,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
SchemaItem,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
_get_table_key,
ColumnCollectionConstraint,
ColumnCollectionMixin
)
from .sql.naming import conv
from .sql.ddl import (
DDL,
CreateTable,
DropTable,
CreateSequence,
DropSequence,
CreateIndex,
DropIndex,
CreateSchema,
DropSchema,
_DropView,
CreateColumn,
AddConstraint,
DropConstraint,
DDLBase,
DDLElement,
_CreateDropBase,
_DDLCompiles,
sort_tables,
sort_tables_and_constraints
)
| mit |
xcgd/hr_streamline | model/res_users.py | 1 | 1921 | # -*- encoding: UTF-8 -*-
##############################################################################
#
# Fiche Action Budget Management, for OpenERP
# Copyright (C) 2014 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class res_users(osv.Model):
_name = 'res.users'
_inherit = 'res.users'
def _get_employee_id(self, cr, uid, ids, name, arg, context=None):
"""
This extends the base user to look up for a matching employee
@returns a dict with k=user.id and v=FIRST matching employee.
v contains the special browse_null if no employee is found.
"""
result = dict()
for user in self.browse(cr, uid, ids, context=context):
if user.employee_ids:
result[user.id] = user.employee_ids[0]
else:
result[user.id] = osv.orm.browse_null()
return result
_columns = {
'employee_id': fields.function(_get_employee_id,
type='many2one',
obj="hr.employee"),
}
| agpl-3.0 |
linas/link-grammar | bindings/python-examples/sentence-check.py | 3 | 6203 | #!/usr/bin/env python3
"""
Note: This only runs with Python3!
Demo: Find unlinked or unknown words.
These demo is extremely simplified.
It can only work with link-grammar library version >= 5.3.10.
Input: English sentences, one per line.
Output: If there are any []-marked words in the linkage results,
the output contains unique combinations of the input sentence with
these works marked. No attempt is done to handle the walls.
Spell guesses are not handled in this demo.
Example:
This is a the test of bfgiuing and xxxvfrg
Output:
Sentence has 1 unlinked word:
1: LEFT-WALL this.p is.v [a] the test.n of bfgiuing[!].g and.j-n xxxvfrg[?].n RIGHT-WALL
2: LEFT-WALL this.p is.v a [the] test.n of bfgiuing[!].g and.j-n xxxvfrg[?].n RIGHT-WALL
3: LEFT-WALL this.p is.v [a] the test.n of bfgiuing[!].g and.j-n xxxvfrg[?].a RIGHT-WALL
4: LEFT-WALL this.p is.v a [the] test.n of bfgiuing[!].g and.j-n xxxvfrg[?].a RIGHT-WALL
"""
import sys
import re
import argparse
from linkgrammar import (Sentence, ParseOptions, Dictionary,
LG_Error, LG_TimerExhausted, Clinkgrammar as clg)
def nsuffix(q):
return '' if q == 1 else 's'
class Formatter(argparse.HelpFormatter):
""" Display the "lang" argument as a first one, as in link-parser. """
def _format_usage(self, usage, actions, groups, prefix):
usage_message = super(Formatter, self)._format_usage(usage, actions, groups, prefix)
return re.sub(r'(usage: \S+) (.*) \[lang]', r'\1 [lang] \2', str(usage_message))
#-----------------------------------------------------------------------------#
is_stdin_atty = sys.stdin.isatty()
PROMPT = "sentence-check: " if is_stdin_atty else ""
DISPLAY_GUESSES = True # Display regex and POS guesses
BATCH_LABELS = '*: '
print("Version:", clg.linkgrammar_get_version())
args = argparse.ArgumentParser(formatter_class=Formatter)
args.add_argument('lang', nargs='?', default='en',
help="language or dictionary location")
args.add_argument("-v", "--verbosity", type=int,default=0,
choices=range(0,199), metavar='[0-199]',
help= "1: Basic verbosity; 2-4: Trace; >5: Debug")
args.add_argument("-p", "--position", action="store_true",
help="show word sentence position")
args.add_argument("-nm", "--no-morphology", dest='morphology', action='store_false',
help="do not display morphology")
args.add_argument("-i", "--interactive", action="store_true",
help="interactive mode after each result")
arg = args.parse_args()
try:
lgdict = Dictionary(arg.lang)
except LG_Error:
# The default error handler will print the error message
args.print_usage()
sys.exit(2)
po = ParseOptions(verbosity=arg.verbosity)
po.max_null_count = 999 # > allowed maximum number of words
po.linkage_limit = 10000 # maximum number of linkages to generate
po.max_parse_time = 10 # actual parse timeout may be about twice bigger
po.spell_guess = True if DISPLAY_GUESSES else False
po.display_morphology = arg.morphology
while True:
try:
sentence_text = input(PROMPT)
except EOFError:
print("EOF")
exit(0)
if not is_stdin_atty and sentence_text:
if sentence_text[0] == '%':
continue
if sentence_text[0] == '!': # ignore user-settings for now
continue
if sentence_text[0] in BATCH_LABELS:
sentence_text = sentence_text[1:]
if sentence_text.strip() == '':
continue
if not is_stdin_atty:
print("\n" + sentence_text)
sent = Sentence(str(sentence_text), lgdict, po)
try:
linkages = sent.parse()
except LG_TimerExhausted:
print('Sentence too complex for parsing in ~{} second{}.'.format(
po.max_parse_time,nsuffix(po.max_parse_time)))
continue
if not linkages:
print('Error occurred - sentence ignored.')
continue
if len(linkages) <= 0:
print('Cannot parse the input sentence')
continue
null_count = sent.null_count()
if arg.position:
print(' ' * len(PROMPT), end='')
for p in range (0, len(sentence_text)):
print(p%10, end="")
print()
if null_count == 0:
print("Sentence parsed OK", end='')
linkages = list(linkages)
correction_found = False
# search for correction suggestions
for l in linkages:
for word in l.words():
if word.find(r'.#') > 0:
correction_found = True
break
if correction_found:
break
if correction_found:
print(" - with correction", end='')
if null_count == 0:
print(".")
guess_found = False
if DISPLAY_GUESSES:
# Check the first linkage for regexed/unknown words
for word in linkages[0].words():
# search for something[x]
if re.search(r'\S+\[[^]]+]', word):
guess_found = True
break
# Show results with unlinked words or guesses
if arg.position or guess_found or correction_found or null_count != 0:
print('Sentence has {} unlinked word{}:'.format(
null_count, nsuffix(null_count)))
result_no = 0
uniqe_parse = {}
for linkage in linkages:
words = list(linkage.words())
if str(words) in uniqe_parse:
continue
result_no += 1
uniqe_parse[str(words)] = True
if arg.position:
words_char = []
words_byte = []
for wi, w in enumerate(words):
words_char.append(w + str((linkage.word_char_start(wi), linkage.word_char_end(wi))))
words_byte.append(w + str((linkage.word_byte_start(wi), linkage.word_byte_end(wi))))
print(u"{}: {}".format(result_no, ' '.join(words_char)))
print(u"{}: {}".format(result_no, ' '.join(words_byte)))
else:
print("{}: {}".format(result_no, ' '.join(words)))
if arg.interactive:
print("Interactive session (^D to end):")
import code
code.interact(local=locals())
| lgpl-2.1 |
RackSec/ansible | lib/ansible/module_utils/gce.py | 94 | 2535 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.gcp import gcp_connect
from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
USER_AGENT_PRODUCT = "Ansible-gce"
USER_AGENT_VERSION = "v1"
def gce_connect(module, provider=None):
"""Return a GCP connection for Google Compute Engine."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
provider = provider or Provider.GCE
return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return gcp_error(error)
| gpl-3.0 |
Intel-tensorflow/tensorflow | tensorflow/python/keras/applications/mobilenet.py | 6 | 19800 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 569 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 418 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 290 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 186 | 4.2 |
------------------------------------------------------------------------
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
"""
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
layers = None
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model.
`mobilenet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| apache-2.0 |
atsaki/ansible | lib/ansible/template/__init__.py | 3 | 21536 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import contextlib
import os
import re
from ansible.compat.six import string_types, text_type, binary_type, StringIO
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.utils import concat as j2_concat
from jinja2.runtime import StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
from ansible.plugins import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.debug import debug
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from numbers import Number
__all__ = ['Templar']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = ( bool, Number )
JINJA2_OVERRIDE = '#jinja2:'
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\','\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i -1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=dict()):
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
self._cached_result = {}
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._test_loader = getattr(shared_loader_obj, 'test_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._test_loader = test_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = Environment(
trim_blocks=True,
undefined=StrictUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
self.environment.template_class = AnsibleJ2Template
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self.block_start = self.environment.block_start_string
self.block_end = self.environment.block_end_string
self.variable_start = self.environment.variable_start_string
self.variable_end = self.environment.variable_end_string
self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1]))
def _get_filters(self):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
plugins = [x for x in self._filter_loader.all()]
self._filters = dict()
for fp in plugins:
self._filters.update(fp.filters())
self._filters.update(self._get_tests())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
plugins = [x for x in self._test_loader.all()]
self._tests = dict()
for fp in plugins:
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
def _clean_data(self, orig_data):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, string_types):
return orig_data
with contextlib.closing(StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in self._clean_regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == self.variable_start[0]:
if token == self.block_start:
block_openings.append(token_start)
elif token == self.variable_start:
print_openings.append(token_start)
elif token[1] == self.variable_end[1]:
prev_idx = None
if token == self.block_end and block_openings:
prev_idx = block_openings.pop()
elif token == self.variable_end and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write(self.environment.comment_start_string)
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write(self.environment.comment_end_string)
else:
raise AnsibleError("Error while cleaning data for safety: unhandled regex match")
return data.getvalue()
def set_available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
'''
assert isinstance(variables, dict)
self._available_variables = variables
self._cached_result = {}
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars = [''], cache = True):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
# Don't template unsafe variables, instead drop them back down to
# their constituent type.
if hasattr(variable, '__UNSAFE__'):
if isinstance(variable, text_type):
return self._clean_data(text_type(variable))
elif isinstance(variable, binary_type):
return self._clean_data(bytes(variable))
else:
return self._clean_data(variable._obj)
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
result = variable
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
# Using a cache in order to prevent template calls with already templated variables
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
options_hash = sha1((text_type(preserve_trailing_newlines) + text_type(escape_backslashes) + text_type(fail_on_undefined) + text_type(overrides)).encode('utf-8'))
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if cache and sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
else:
result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides)
if convert_data:
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
# we only cache in the case where we have a single variable
# name, to make sure we're not putting things which may otherwise
# be dynamic in the cache (filters, lookups, etc.)
if cache:
self._cached_result[sha1_hash] = result
return result
elif isinstance(variable, (list, tuple)):
return [self.template(v, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
elif isinstance(variable, dict):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
else:
d[k] = variable[k]
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def _contains_vars(self, data):
'''
returns True if the data contains a variable pattern
'''
return self.environment.block_start_string in data or self.environment.variable_start_string in data
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned
'''
return thing if thing is not None else ''
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
wantlist = kwargs.pop('wantlist', False)
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
raise
ran = None
if ran:
from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
if wantlist:
ran = wrap_var(ran)
else:
ran = UnsafeProxy(",".join(ran))
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def _do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None):
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol+1:]
for pair in line.split(','):
(key,val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
#FIXME: add tests
myenv.filters.update(self._get_filters())
myenv.tests.update(self._get_tests())
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\"
# instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s" % str(e))
except Exception as e:
if 'recursion' in str(e):
raise AnsibleError("recursive loop detected in template string: %s" % data)
else:
return data
t.globals['lookup'] = self._lookup
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
except TypeError as te:
if 'StrictUndefined' in str(te):
raise AnsibleUndefinedVariable(
"Unable to look up a name or access an attribute in template string. " + \
"Make sure your variable name does not contain invalid characters like '-'."
)
else:
debug("failing because of a type error, template data is: %s" % data)
raise AnsibleError("an unexpected type error occurred. Error was %s" % te)
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# jinja2 added a keep_trailing_newline option in 2.7 when
# creating an Environment. That would let us make this code
# better (remove a single newline if
# preserve_trailing_newlines is False). Once we can depend on
# that version being present, modify our code to set that when
# initializing self.environment and remove a single trailing
# newline here if preserve_newlines is False.
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += '\n' * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
#TODO: return warning about undefined var
return data
| gpl-3.0 |
ai-ku/langvis | jython-2.1/Lib/mhlib.py | 4 | 34261 | """MH interface -- purely object-oriented (well, almost)
Executive summary:
import mhlib
mh = mhlib.MH() # use default mailbox directory and profile
mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
mh = mhlib.MH(mailbox, profile) # override mailbox and profile
mh.error(format, ...) # print error message -- can be overridden
s = mh.getprofile(key) # profile entry (None if not set)
path = mh.getpath() # mailbox pathname
name = mh.getcontext() # name of current folder
mh.setcontext(name) # set name of current folder
list = mh.listfolders() # names of top-level folders
list = mh.listallfolders() # names of all folders, including subfolders
list = mh.listsubfolders(name) # direct subfolders of given folder
list = mh.listallsubfolders(name) # all subfolders of given folder
mh.makefolder(name) # create new folder
mh.deletefolder(name) # delete folder -- must have no subfolders
f = mh.openfolder(name) # new open folder object
f.error(format, ...) # same as mh.error(format, ...)
path = f.getfullname() # folder's full pathname
path = f.getsequencesfilename() # full pathname of folder's sequences file
path = f.getmessagefilename(n) # full pathname of message n in folder
list = f.listmessages() # list of messages in folder (as numbers)
n = f.getcurrent() # get current message
f.setcurrent(n) # set current message
list = f.parsesequence(seq) # parse msgs syntax into list of messages
n = f.getlast() # get last message (0 if no messagse)
f.setlast(n) # set last message (internal use only)
dict = f.getsequences() # dictionary of sequences in folder {name: list}
f.putsequences(dict) # write sequences back to folder
f.createmessage(n, fp) # add message from file f as number n
f.removemessages(list) # remove messages in list from folder
f.refilemessages(list, tofolder) # move messages in list to other folder
f.movemessage(n, tofolder, ton) # move one message to a given destination
f.copymessage(n, tofolder, ton) # copy one message to a given destination
m = f.openmessage(n) # new open message object (costs a file descriptor)
m is a derived class of mimetools.Message(rfc822.Message), with:
s = m.getheadertext() # text of message's headers
s = m.getheadertext(pred) # text of message's headers, filtered by pred
s = m.getbodytext() # text of message's body, decoded
s = m.getbodytext(0) # text of message's body, not decoded
"""
# XXX To do, functionality:
# - annotate messages
# - send messages
#
# XXX To do, organization:
# - move IntSet to separate file
# - move most Message functionality to module mimetools
# Customizable defaults
MH_PROFILE = '~/.mh_profile'
PATH = '~/Mail'
MH_SEQUENCES = '.mh_sequences'
FOLDER_PROTECT = 0700
# Imported modules
import os
import sys
from stat import ST_NLINK
import re
import mimetools
import multifile
import shutil
from bisect import bisect
__all__ = ["MH","Error","Folder","Message"]
# Exported constants
class Error(Exception):
pass
class MH:
"""Class representing a particular collection of folders.
Optional constructor arguments are the pathname for the directory
containing the collection, and the MH profile to use.
If either is omitted or empty a default is used; the default
directory is taken from the MH profile if it is specified there."""
def __init__(self, path = None, profile = None):
"""Constructor."""
if not profile: profile = MH_PROFILE
self.profile = os.path.expanduser(profile)
if not path: path = self.getprofile('Path')
if not path: path = PATH
if not os.path.isabs(path) and path[0] != '~':
path = os.path.join('~', path)
path = os.path.expanduser(path)
if not os.path.isdir(path): raise Error, 'MH() path not found'
self.path = path
def __repr__(self):
"""String representation."""
return 'MH(%s, %s)' % (`self.path`, `self.profile`)
def error(self, msg, *args):
"""Routine to print an error. May be overridden by a derived class."""
sys.stderr.write('MH error: %s\n' % (msg % args))
def getprofile(self, key):
"""Return a profile entry, None if not found."""
return pickline(self.profile, key)
def getpath(self):
"""Return the path (the name of the collection's directory)."""
return self.path
def getcontext(self):
"""Return the name of the current folder."""
context = pickline(os.path.join(self.getpath(), 'context'),
'Current-Folder')
if not context: context = 'inbox'
return context
def setcontext(self, context):
"""Set the name of the current folder."""
fn = os.path.join(self.getpath(), 'context')
f = open(fn, "w")
f.write("Current-Folder: %s\n" % context)
f.close()
def listfolders(self):
"""Return the names of the top-level folders."""
folders = []
path = self.getpath()
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
folders.append(name)
folders.sort()
return folders
def listsubfolders(self, name):
"""Return the names of the subfolders in a given folder
(prefixed with the given folder name)."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
st = os.stat(fullname)
nlinks = st[ST_NLINK]
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def listallfolders(self):
"""Return the names of all folders and subfolders, recursively."""
return self.listallsubfolders('')
def listallsubfolders(self, name):
"""Return the names of subfolders in a given folder, recursively."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
st = os.stat(fullname)
nlinks = st[ST_NLINK]
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
if subname[0] == ',' or isnumeric(subname): continue
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
if not os.path.islink(fullsubname):
subsubfolders = self.listallsubfolders(
name_subname)
subfolders = subfolders + subsubfolders
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def openfolder(self, name):
"""Return a new Folder object for the named folder."""
return Folder(self, name)
def makefolder(self, name):
"""Create a new folder (or raise os.error if it cannot be created)."""
protect = pickline(self.profile, 'Folder-Protect')
if protect and isnumeric(protect):
mode = int(protect, 8)
else:
mode = FOLDER_PROTECT
os.mkdir(os.path.join(self.getpath(), name), mode)
def deletefolder(self, name):
"""Delete a folder. This removes files in the folder but not
subdirectories. Raise os.error if deleting the folder itself fails."""
fullname = os.path.join(self.getpath(), name)
for subname in os.listdir(fullname):
fullsubname = os.path.join(fullname, subname)
try:
os.unlink(fullsubname)
except os.error:
self.error('%s not deleted, continuing...' %
fullsubname)
os.rmdir(fullname)
numericprog = re.compile('^[1-9][0-9]*$')
def isnumeric(str):
return numericprog.match(str) is not None
class Folder:
"""Class representing a particular folder."""
def __init__(self, mh, name):
"""Constructor."""
self.mh = mh
self.name = name
if not os.path.isdir(self.getfullname()):
raise Error, 'no folder %s' % name
def __repr__(self):
"""String representation."""
return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
def error(self, *args):
"""Error message handler."""
apply(self.mh.error, args)
def getfullname(self):
"""Return the full pathname of the folder."""
return os.path.join(self.mh.path, self.name)
def getsequencesfilename(self):
"""Return the full pathname of the folder's sequences file."""
return os.path.join(self.getfullname(), MH_SEQUENCES)
def getmessagefilename(self, n):
"""Return the full pathname of a message in the folder."""
return os.path.join(self.getfullname(), str(n))
def listsubfolders(self):
"""Return list of direct subfolders."""
return self.mh.listsubfolders(self.name)
def listallsubfolders(self):
"""Return list of all subfolders."""
return self.mh.listallsubfolders(self.name)
def listmessages(self):
"""Return the list of messages currently present in the folder.
As a side effect, set self.last to the last message (or 0)."""
messages = []
match = numericprog.match
append = messages.append
for name in os.listdir(self.getfullname()):
if match(name):
append(name)
messages = map(int, messages)
messages.sort()
if messages:
self.last = messages[-1]
else:
self.last = 0
return messages
def getsequences(self):
"""Return the set of sequences for the folder."""
sequences = {}
fullname = self.getsequencesfilename()
try:
f = open(fullname, 'r')
except IOError:
return sequences
while 1:
line = f.readline()
if not line: break
fields = line.split(':')
if len(fields) != 2:
self.error('bad sequence in %s: %s' %
(fullname, line.strip()))
key = fields[0].strip()
value = IntSet(fields[1].strip(), ' ').tolist()
sequences[key] = value
return sequences
def putsequences(self, sequences):
"""Write the set of sequences back to the folder."""
fullname = self.getsequencesfilename()
f = None
for key in sequences.keys():
s = IntSet('', ' ')
s.fromlist(sequences[key])
if not f: f = open(fullname, 'w')
f.write('%s: %s\n' % (key, s.tostring()))
if not f:
try:
os.unlink(fullname)
except os.error:
pass
else:
f.close()
def getcurrent(self):
"""Return the current message. Raise Error when there is none."""
seqs = self.getsequences()
try:
return max(seqs['cur'])
except (ValueError, KeyError):
raise Error, "no cur message"
def setcurrent(self, n):
"""Set the current message."""
updateline(self.getsequencesfilename(), 'cur', str(n), 0)
def parsesequence(self, seq):
"""Parse an MH sequence specification into a message list.
Attempt to mimic mh-sequence(5) as close as possible.
Also attempt to mimic observed behavior regarding which
conditions cause which error messages."""
# XXX Still not complete (see mh-format(5)).
# Missing are:
# - 'prev', 'next' as count
# - Sequence-Negation option
all = self.listmessages()
# Observed behavior: test for empty folder is done first
if not all:
raise Error, "no messages in %s" % self.name
# Common case first: all is frequently the default
if seq == 'all':
return all
# Test for X:Y before X-Y because 'seq:-n' matches both
i = seq.find(':')
if i >= 0:
head, dir, tail = seq[:i], '', seq[i+1:]
if tail[:1] in '-+':
dir, tail = tail[:1], tail[1:]
if not isnumeric(tail):
raise Error, "bad message list %s" % seq
try:
count = int(tail)
except (ValueError, OverflowError):
# Can't use sys.maxint because of i+count below
count = len(all)
try:
anchor = self._parseindex(head, all)
except Error, msg:
seqs = self.getsequences()
if not seqs.has_key(head):
if not msg:
msg = "bad message list %s" % seq
raise Error, msg, sys.exc_info()[2]
msgs = seqs[head]
if not msgs:
raise Error, "sequence %s empty" % head
if dir == '-':
return msgs[-count:]
else:
return msgs[:count]
else:
if not dir:
if head in ('prev', 'last'):
dir = '-'
if dir == '-':
i = bisect(all, anchor)
return all[max(0, i-count):i]
else:
i = bisect(all, anchor-1)
return all[i:i+count]
# Test for X-Y next
i = seq.find('-')
if i >= 0:
begin = self._parseindex(seq[:i], all)
end = self._parseindex(seq[i+1:], all)
i = bisect(all, begin-1)
j = bisect(all, end)
r = all[i:j]
if not r:
raise Error, "bad message list %s" % seq
return r
# Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
try:
n = self._parseindex(seq, all)
except Error, msg:
seqs = self.getsequences()
if not seqs.has_key(seq):
if not msg:
msg = "bad message list %s" % seq
raise Error, msg
return seqs[seq]
else:
if n not in all:
if isnumeric(seq):
raise Error, "message %d doesn't exist" % n
else:
raise Error, "no %s message" % seq
else:
return [n]
def _parseindex(self, seq, all):
"""Internal: parse a message number (or cur, first, etc.)."""
if isnumeric(seq):
try:
return int(seq)
except (OverflowError, ValueError):
return sys.maxint
if seq in ('cur', '.'):
return self.getcurrent()
if seq == 'first':
return all[0]
if seq == 'last':
return all[-1]
if seq == 'next':
n = self.getcurrent()
i = bisect(all, n)
try:
return all[i]
except IndexError:
raise Error, "no next message"
if seq == 'prev':
n = self.getcurrent()
i = bisect(all, n-1)
if i == 0:
raise Error, "no prev message"
try:
return all[i-1]
except IndexError:
raise Error, "no prev message"
raise Error, None
def openmessage(self, n):
"""Open a message -- returns a Message object."""
return Message(self, n)
def removemessages(self, list):
"""Remove one or more messages -- may raise os.error."""
errors = []
deleted = []
for n in list:
path = self.getmessagefilename(n)
commapath = self.getmessagefilename(',' + str(n))
try:
os.unlink(commapath)
except os.error:
pass
try:
os.rename(path, commapath)
except os.error, msg:
errors.append(msg)
else:
deleted.append(n)
if deleted:
self.removefromallsequences(deleted)
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def refilemessages(self, list, tofolder, keepsequences=0):
"""Refile one or more messages -- may raise os.error.
'tofolder' is an open folder object."""
errors = []
refiled = {}
for n in list:
ton = tofolder.getlast() + 1
path = self.getmessagefilename(n)
topath = tofolder.getmessagefilename(ton)
try:
os.rename(path, topath)
except os.error:
# Try copying
try:
shutil.copy2(path, topath)
os.unlink(path)
except (IOError, os.error), msg:
errors.append(msg)
try:
os.unlink(topath)
except os.error:
pass
continue
tofolder.setlast(ton)
refiled[n] = ton
if refiled:
if keepsequences:
tofolder._copysequences(self, refiled.items())
self.removefromallsequences(refiled.keys())
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def _copysequences(self, fromfolder, refileditems):
"""Helper for refilemessages() to copy sequences."""
fromsequences = fromfolder.getsequences()
tosequences = self.getsequences()
changed = 0
for name, seq in fromsequences.items():
try:
toseq = tosequences[name]
new = 0
except:
toseq = []
new = 1
for fromn, ton in refileditems:
if fromn in seq:
toseq.append(ton)
changed = 1
if new and toseq:
tosequences[name] = toseq
if changed:
self.putsequences(tosequences)
def movemessage(self, n, tofolder, ton):
"""Move one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
try:
os.rename(path, topath)
except os.error:
# Try copying
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
os.unlink(path)
self.removefromallsequences([n])
def copymessage(self, n, tofolder, ton):
"""Copy one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
def createmessage(self, n, txt):
"""Create a message, with text from the open file txt."""
path = self.getmessagefilename(n)
backuppath = self.getmessagefilename(',%d' % n)
try:
os.rename(path, backuppath)
except os.error:
pass
ok = 0
BUFSIZE = 16*1024
try:
f = open(path, "w")
while 1:
buf = txt.read(BUFSIZE)
if not buf:
break
f.write(buf)
f.close()
ok = 1
finally:
if not ok:
try:
os.unlink(path)
except os.error:
pass
def removefromallsequences(self, list):
"""Remove one or more messages from all sequences (including last)
-- but not from 'cur'!!!"""
if hasattr(self, 'last') and self.last in list:
del self.last
sequences = self.getsequences()
changed = 0
for name, seq in sequences.items():
if name == 'cur':
continue
for n in list:
if n in seq:
seq.remove(n)
changed = 1
if not seq:
del sequences[name]
if changed:
self.putsequences(sequences)
def getlast(self):
"""Return the last message number."""
if not hasattr(self, 'last'):
messages = self.listmessages()
return self.last
def setlast(self, last):
"""Set the last message number."""
if last is None:
if hasattr(self, 'last'):
del self.last
else:
self.last = last
class Message(mimetools.Message):
def __init__(self, f, n, fp = None):
"""Constructor."""
self.folder = f
self.number = n
if not fp:
path = f.getmessagefilename(n)
fp = open(path, 'r')
mimetools.Message.__init__(self, fp)
def __repr__(self):
"""String representation."""
return 'Message(%s, %s)' % (repr(self.folder), self.number)
def getheadertext(self, pred = None):
"""Return the message's header text as a string. If an
argument is specified, it is used as a filter predicate to
decide which headers to return (its argument is the header
name converted to lower case)."""
if not pred:
return ''.join(self.headers)
headers = []
hit = 0
for line in self.headers:
if not line[0].isspace():
i = line.find(':')
if i > 0:
hit = pred(line[:i].lower())
if hit: headers.append(line)
return ''.join(headers)
def getbodytext(self, decode = 1):
"""Return the message's body text as string. This undoes a
Content-Transfer-Encoding, but does not interpret other MIME
features (e.g. multipart messages). To suppress decoding,
pass 0 as an argument."""
self.fp.seek(self.startofbody)
encoding = self.getencoding()
if not decode or encoding in ('', '7bit', '8bit', 'binary'):
return self.fp.read()
from StringIO import StringIO
output = StringIO()
mimetools.decode(self.fp, output, encoding)
return output.getvalue()
def getbodyparts(self):
"""Only for multipart messages: return the message's body as a
list of SubMessage objects. Each submessage object behaves
(almost) as a Message object."""
if self.getmaintype() != 'multipart':
raise Error, 'Content-Type is not multipart/*'
bdry = self.getparam('boundary')
if not bdry:
raise Error, 'multipart/* without boundary param'
self.fp.seek(self.startofbody)
mf = multifile.MultiFile(self.fp)
mf.push(bdry)
parts = []
while mf.next():
n = str(self.number) + '.' + `1 + len(parts)`
part = SubMessage(self.folder, n, mf)
parts.append(part)
mf.pop()
return parts
def getbody(self):
"""Return body, either a string or a list of messages."""
if self.getmaintype() == 'multipart':
return self.getbodyparts()
else:
return self.getbodytext()
class SubMessage(Message):
def __init__(self, f, n, fp):
"""Constructor."""
Message.__init__(self, f, n, fp)
if self.getmaintype() == 'multipart':
self.body = Message.getbodyparts(self)
else:
self.body = Message.getbodytext(self)
self.bodyencoded = Message.getbodytext(self, decode=0)
# XXX If this is big, should remember file pointers
def __repr__(self):
"""String representation."""
f, n, fp = self.folder, self.number, self.fp
return 'SubMessage(%s, %s, %s)' % (f, n, fp)
def getbodytext(self, decode = 1):
if not decode:
return self.bodyencoded
if type(self.body) == type(''):
return self.body
def getbodyparts(self):
if type(self.body) == type([]):
return self.body
def getbody(self):
return self.body
class IntSet:
"""Class implementing sets of integers.
This is an efficient representation for sets consisting of several
continuous ranges, e.g. 1-100,200-400,402-1000 is represented
internally as a list of three pairs: [(1,100), (200,400),
(402,1000)]. The internal representation is always kept normalized.
The constructor has up to three arguments:
- the string used to initialize the set (default ''),
- the separator between ranges (default ',')
- the separator between begin and end of a range (default '-')
The separators must be strings (not regexprs) and should be different.
The tostring() function yields a string that can be passed to another
IntSet constructor; __repr__() is a valid IntSet constructor itself.
"""
# XXX The default begin/end separator means that negative numbers are
# not supported very well.
#
# XXX There are currently no operations to remove set elements.
def __init__(self, data = None, sep = ',', rng = '-'):
self.pairs = []
self.sep = sep
self.rng = rng
if data: self.fromstring(data)
def reset(self):
self.pairs = []
def __cmp__(self, other):
return cmp(self.pairs, other.pairs)
def __hash__(self):
return hash(self.pairs)
def __repr__(self):
return 'IntSet(%s, %s, %s)' % (`self.tostring()`,
`self.sep`, `self.rng`)
def normalize(self):
self.pairs.sort()
i = 1
while i < len(self.pairs):
alo, ahi = self.pairs[i-1]
blo, bhi = self.pairs[i]
if ahi >= blo-1:
self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
else:
i = i+1
def tostring(self):
s = ''
for lo, hi in self.pairs:
if lo == hi: t = `lo`
else: t = `lo` + self.rng + `hi`
if s: s = s + (self.sep + t)
else: s = t
return s
def tolist(self):
l = []
for lo, hi in self.pairs:
m = range(lo, hi+1)
l = l + m
return l
def fromlist(self, list):
for i in list:
self.append(i)
def clone(self):
new = IntSet()
new.pairs = self.pairs[:]
return new
def min(self):
return self.pairs[0][0]
def max(self):
return self.pairs[-1][-1]
def contains(self, x):
for lo, hi in self.pairs:
if lo <= x <= hi: return 1
return 0
def append(self, x):
for i in range(len(self.pairs)):
lo, hi = self.pairs[i]
if x < lo: # Need to insert before
if x+1 == lo:
self.pairs[i] = (x, hi)
else:
self.pairs.insert(i, (x, x))
if i > 0 and x-1 == self.pairs[i-1][1]:
# Merge with previous
self.pairs[i-1:i+1] = [
(self.pairs[i-1][0],
self.pairs[i][1])
]
return
if x <= hi: # Already in set
return
i = len(self.pairs) - 1
if i >= 0:
lo, hi = self.pairs[i]
if x-1 == hi:
self.pairs[i] = lo, x
return
self.pairs.append((x, x))
def addpair(self, xlo, xhi):
if xlo > xhi: return
self.pairs.append((xlo, xhi))
self.normalize()
def fromstring(self, data):
new = []
for part in data.split(self.sep):
list = []
for subp in part.split(self.rng):
s = subp.strip()
list.append(int(s))
if len(list) == 1:
new.append((list[0], list[0]))
elif len(list) == 2 and list[0] <= list[1]:
new.append((list[0], list[1]))
else:
raise ValueError, 'bad data passed to IntSet'
self.pairs = self.pairs + new
self.normalize()
# Subroutines to read/write entries in .mh_profile and .mh_sequences
def pickline(file, key, casefold = 1):
try:
f = open(file, 'r')
except IOError:
return None
pat = re.escape(key) + ':'
prog = re.compile(pat, casefold and re.IGNORECASE)
while 1:
line = f.readline()
if not line: break
if prog.match(line):
text = line[len(key)+1:]
while 1:
line = f.readline()
if not line or not line[0].isspace():
break
text = text + line
return text.strip()
return None
def updateline(file, key, value, casefold = 1):
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except IOError:
lines = []
pat = re.escape(key) + ':(.*)\n'
prog = re.compile(pat, casefold and re.IGNORECASE)
if value is None:
newline = None
else:
newline = '%s: %s\n' % (key, value)
for i in range(len(lines)):
line = lines[i]
if prog.match(line):
if newline is None:
del lines[i]
else:
lines[i] = newline
break
else:
if newline is not None:
lines.append(newline)
tempfile = file + "~"
f = open(tempfile, 'w')
for line in lines:
f.write(line)
f.close()
os.rename(tempfile, file)
# Test program
def test():
global mh, f
os.system('rm -rf $HOME/Mail/@test')
mh = MH()
def do(s): print s; print eval(s)
do('mh.listfolders()')
do('mh.listallfolders()')
testfolders = ['@test', '@test/test1', '@test/test2',
'@test/test1/test11', '@test/test1/test12',
'@test/test1/test11/test111']
for t in testfolders: do('mh.makefolder(%s)' % `t`)
do('mh.listsubfolders(\'@test\')')
do('mh.listallsubfolders(\'@test\')')
f = mh.openfolder('@test')
do('f.listsubfolders()')
do('f.listallsubfolders()')
do('f.getsequences()')
seqs = f.getsequences()
seqs['foo'] = IntSet('1-10 12-20', ' ').tolist()
print seqs
f.putsequences(seqs)
do('f.getsequences()')
testfolders.reverse()
for t in testfolders: do('mh.deletefolder(%s)' % `t`)
do('mh.getcontext()')
context = mh.getcontext()
f = mh.openfolder(context)
do('f.getcurrent()')
for seq in ['first', 'last', 'cur', '.', 'prev', 'next',
'first:3', 'last:3', 'cur:3', 'cur:-3',
'prev:3', 'next:3',
'1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
'all']:
try:
do('f.parsesequence(%s)' % `seq`)
except Error, msg:
print "Error:", msg
stuff = os.popen("pick %s 2>/dev/null" % `seq`).read()
list = map(int, stuff.split())
print list, "<-- pick"
do('f.listmessages()')
if __name__ == '__main__':
test()
| mit |
Aendra/django_vgv | admin/views.py | 4 | 2342 | # -*- coding: utf-8 -*-
from django.http import Http404
from django.shortcuts import get_object_or_404
from cms.models import Page, Title, CMSPlugin, Placeholder
def revert_plugins(request, version_id, obj):
from cms.utils.reversion_hacks import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
cms_plugin_list = []
placeholders = {}
plugin_list = []
titles = []
others = []
page = obj
for rev in revs:
obj = rev.object
if obj.__class__ == Placeholder:
placeholders[obj.pk] = obj
if obj.__class__ == CMSPlugin:
cms_plugin_list.append(obj)
elif hasattr(obj, 'cmsplugin_ptr_id'):
plugin_list.append(obj)
elif obj.__class__ == Page:
pass
elif obj.__class__ == Title:
titles.append(obj)
else:
others.append(rev)
if not page.has_change_permission(request):
raise Http404
current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))
for pk, placeholder in placeholders.items():
# admin has already created the placeholders/ get them instead
try:
placeholders[pk] = page.placeholders.get(slot=placeholder.slot)
except Placeholder.DoesNotExist:
placeholders[pk].save()
page.placeholders.add(placeholders[pk])
for plugin in cms_plugin_list:
# connect plugins to the correct placeholder
plugin.placeholder = placeholders[plugin.placeholder_id]
plugin.save(no_signals=True)
for plugin in cms_plugin_list:
plugin.save()
for p in plugin_list:
if int(p.cmsplugin_ptr_id) == int(plugin.pk):
plugin.set_base_attr(p)
p.save()
for old in current_plugins:
if old.pk == plugin.pk:
plugin.save()
current_plugins.remove(old)
for title in titles:
title.page = page
try:
title.save()
except:
title.pk = Title.objects.get(page=page, language=title.language).pk
title.save()
for other in others:
other.object.save()
for plugin in current_plugins:
plugin.delete()
| bsd-3-clause |
sokil/DistributiveManager | routes/distributive.py | 1 | 4993 | import os
from flask import Blueprint, jsonify, render_template, request, current_app, redirect, url_for, flash, abort
from flask_login import login_required
from bson.objectid import ObjectId
distributive = Blueprint('distributive', __name__)
@distributive.route('/distributive/list/<environment_name>')
@login_required
def distributive_list(environment_name):
# environment
environment = current_app.connection.Environment.find_one({'name': environment_name})
if environment is None:
abort(404)
# distributive list
distributives = current_app.connection.Distributive.find({
'environment': environment['_id']
}).sort('version.number', -1)
if distributives is None:
distributives = []
# render
return render_template("distributive_list.html",
environment=environment,
distributives=distributives
)
@distributive.route('/distributive/new/<environment_id>')
@login_required
def distributive_new(environment_id):
# get environment_id
environment_instance = current_app.connection.Environment.find_one({'_id': ObjectId(environment_id)})
if environment_instance is None:
raise Exception('Environment not found')
# init distributive
distributive_instance = current_app.connection.Distributive()
distributive_instance.set_environment(environment_instance)
return render_template('distributive_edit.html', distributive=distributive_instance)
@distributive.route('/distributive/edit/<distributive_id>')
@login_required
def distributive_edit(distributive_id):
# get distributive
distributive_instance = current_app.connection.Distributive.find_one({'_id': ObjectId(distributive_id)})
if distributive_instance is None:
raise Exception('Distributive not found')
return render_template('distributive_edit.html', distributive=distributive_instance)
@distributive.route("/distributive/save", methods=['POST'])
@login_required
def distributive_save():
distributive_id = request.form['id']
if distributive_id:
# get distributive
distributive_instance = current_app.connection.Distributive.find_one({'_id': ObjectId(distributive_id)})
# get environment
environment_instance = current_app.connection.Environment.find_one({'_id': distributive_instance['environment']})
else:
# get environment
environment_id = request.form['environment']
environment_instance = current_app.connection.Environment.find_one({'_id': ObjectId(environment_id)})
if environment_instance is None:
raise Exception('Environment not found')
# create distributive
distributive_instance = current_app.connection.Distributive()
distributive_instance.set_environment(environment_instance)
# version
distributive_instance.set_version(request.form['version'])
# upload file
if request.files['file'].filename:
distributive_instance.set_file(request.files['file'])
distributive_instance.save()
flash('Successfully saved')
return redirect(url_for('.distributive_list', environment_name=environment_instance['name']))
@distributive.route('/distributive/delete/<distributive_id>')
@login_required
def distributive_delete(distributive_id):
# get distributive
distributive_instance = current_app.connection.Distributive.find_one({'_id': ObjectId(distributive_id)})
if distributive_instance is None:
raise Exception('Distributive not found')
# get related environment
environment_instance = current_app.connection.Environment.find_one({'_id': distributive_instance['environment']})
# delete distributive
distributive_instance.delete()
flash('Successfully deleted')
return redirect(url_for('.distributive_list', environment_name=environment_instance['name']))
@distributive.route('/dl/<environment_name>')
@distributive.route('/dl/<environment_name>/<version_caption>')
def distributive_download(environment_name, version_caption='latest'):
# get distributive instance
environment_instance = current_app.connection.Environment.find_one({'name': environment_name})
if environment_instance is None:
abort(404)
if version_caption == 'latest':
distributive_instance = environment_instance.get_latest_distributive()
else:
distributive_instance = current_app.connection.Distributive.find_one({
'environment': environment_instance['_id'],
'version.caption': version_caption
})
if distributive_instance is None:
abort(404)
# increment download counter in stat
distributive_instance.hit()
# redirect to download
from flask import Response
response = Response()
response.headers.add('Content-Disposition', 'attachment; filename="' + distributive_instance['file'] + '"')
response.headers.add('X-Accel-Redirect', distributive_instance.get_accel_redirect_url())
return response
| mit |
bjwelker/Raspi-Rollo | webapp/app.py | 1 | 1182 | #!/usr/bin/python
from flask import Flask, render_template, send_from_directory, request, jsonify
import os
app = Flask(__name__)
''' Channel Config 1-5. Chan 0 fuer alle 5 Channels gleichzeitig
Mode 1 HOCH - Mode 2 Runter - Mode 3 Stop '''
config = {'chan0mode1': '', 'chan0mode2': '', 'chan0mode3': '', 'chan1mode1': '', 'chan1mode2': '', 'chan1mode3': '', 'chan2mode1': '', 'chan2mode2': '', 'chan2mode3': '', 'chan3mode1': '', 'chan3mode2': '', 'chan3mode3': '', 'chan4mode1': '', 'chan4mode2': '', 'chan4mode3': '', 'chan5mode1': '', 'chan5mode2': '', 'chan5mode3': ''}
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api', methods=['POST'])
def rollo_switch():
json = request.json
if int(request.json['chan']) <= 5 :
todo = "chan" + str(request.json['chan']) + "mode" + str(request.json['mode'])
if config[todo] != '':
cmdsend = "sudo bin/sendv2 " + str(config[todo])
os.system(cmdsend);
return jsonify({'ok': 'Done'})
else:
return jsonify({'error': 'Geraetecode nicht gesetzt'})
else:
return jsonify({'error': 'Wrong Channel'})
if __name__ == '__main__':
#app.debug = True
app.run(host='0.0.0.0')
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/pygments/lexers/perl.py | 35 | 31601 | # -*- coding: utf-8 -*-
"""
pygments.lexers.perl
~~~~~~~~~~~~~~~~~~~~
Lexers for Perl and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
using, this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import shebang_matches
__all__ = ['PerlLexer', 'Perl6Lexer']
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm', '*.t']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this to a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\A\#!.+?$', Comment.Hashbang),
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(words((
'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach',
'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then',
'unless', 'until', 'while', 'use', 'print', 'new', 'BEGIN',
'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'),
Keyword),
(r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*',
String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*',
String.Regex),
(r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*',
String.Regex),
# balanced delimiters
(r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex,
'balanced-regex'),
(r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex,
'balanced-regex'),
(r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*',
String.Regex),
(r'\s+', Text),
(words((
'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir',
'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect',
'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die',
'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent',
'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl',
'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid',
'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin',
'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp',
'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber',
'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname',
'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime',
'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last',
'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat',
'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'no', 'oct', 'open',
'opendir', 'ord', 'our', 'pack', 'package', 'pipe', 'pop', 'pos', 'printf',
'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir',
'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', 'require',
'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir',
'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent',
'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent',
'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown',
'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt',
'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread',
'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr',
'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie',
'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'),
Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_]\w*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regex),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'\w+::', Name.Namespace),
(r'[\w:]+', Name.Variable, '#pop'),
],
'name': [
(r'\w+::', Name.Namespace),
(r'[\w:]+', Name, '#pop'),
(r'[A-Z_]+(?=\W)', Name.Constant, '#pop'),
(r'(?=\W)', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_]\w*[!?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r';', Punctuation, '#pop'),
(r'.*?\{', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[{}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^{}\\]+', String.Other)
],
'rb-string': [
(r'\\[()\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^()]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[<>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^<>]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
if re.search('(?:my|our)\s+[$@%(]', text):
return 0.9
class Perl6Lexer(ExtendedRegexLexer):
"""
For `Perl 6 <http://www.perl6.org>`_ source code.
.. versionadded:: 2.0
"""
name = 'Perl6'
aliases = ['perl6', 'pl6']
filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
'*.6pm', '*.p6m', '*.pm6', '*.t']
mimetypes = ['text/x-perl6', 'application/x-perl6']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
PERL6_IDENTIFIER_RANGE = "['\w:-]"
PERL6_KEYWORDS = (
'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT',
'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP',
'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but',
'cached', 'category', 'class', 'constant', 'contend', 'continue',
'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else',
'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for',
'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline',
'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro',
'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of',
'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec',
'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat',
'require', 'required', 'return', 'returns', 'role', 'rule', 'rw',
'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede',
'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary',
'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will',
)
PERL6_BUILTINS = (
'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH',
'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh',
'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh',
'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by',
'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat',
'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot',
'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes',
'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech',
'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag',
'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval',
'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists',
'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo',
'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw',
'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix',
'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator',
'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst',
'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map',
'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc',
'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not',
'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord',
'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi',
'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix',
'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi',
'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce',
'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round',
'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say',
'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature',
'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice',
'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ',
'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to',
'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc',
'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack',
'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec',
'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip',
)
PERL6_BUILTIN_CLASSES = (
'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit',
'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class',
'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception',
'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing',
'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet',
'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method',
'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair',
'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex',
'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen',
'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void',
'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32',
'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2',
'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2',
'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2',
'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8',
)
PERL6_OPERATORS = (
'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div',
'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm',
'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx',
'++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^',
'^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&',
'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^',
'~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^',
'!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv',
'&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so',
'not', '<==', '==>', '<<==', '==>>',
)
# Perl 6 has a *lot* of possible bracketing characters
# this list was lifted from STD.pm6 (https://github.com/perl6/std)
PERL6_BRACKETS = {
u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d',
u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b',
u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019',
u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d',
u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a',
u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e',
u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d',
u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd',
u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265',
u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b',
u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273',
u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279',
u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f',
u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285',
u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b',
u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8',
u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4',
u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1',
u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7',
u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1',
u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db',
u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1',
u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7',
u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed',
u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb',
u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe',
u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a',
u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b',
u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771',
u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4',
u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de',
u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7',
u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984',
u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a',
u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990',
u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996',
u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5',
u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5',
u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9',
u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e',
u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65',
u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80',
u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c',
u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96',
u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c',
u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9',
u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0',
u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe',
u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4',
u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0',
u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6',
u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa',
u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a',
u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21',
u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d',
u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015',
u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b',
u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18',
u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a',
u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40',
u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48',
u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e',
u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d',
u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63',
}
def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''):
if boundary_regex_fragment is None:
return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \
suffix + r')\b'
else:
return r'(?<!' + boundary_regex_fragment + r')' + prefix + r'(' + \
r'|'.join(re.escape(x) for x in words) + r')' + suffix + r'(?!' + \
boundary_regex_fragment + r')'
def brackets_callback(token_class):
def callback(lexer, match, context):
groups = match.groupdict()
opening_chars = groups['delimiter']
n_chars = len(opening_chars)
adverbs = groups.get('adverbs')
closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0])
text = context.text
if closer is None: # it's not a mirrored character, which means we
# just need to look for the next occurrence
end_pos = text.find(opening_chars, match.start('delimiter') + n_chars)
else: # we need to look for the corresponding closing character,
# keep nesting in mind
closing_chars = closer * n_chars
nesting_level = 1
search_pos = match.start('delimiter')
while nesting_level > 0:
next_open_pos = text.find(opening_chars, search_pos + n_chars)
next_close_pos = text.find(closing_chars, search_pos + n_chars)
if next_close_pos == -1:
next_close_pos = len(text)
nesting_level = 0
elif next_open_pos != -1 and next_open_pos < next_close_pos:
nesting_level += 1
search_pos = next_open_pos
else: # next_close_pos < next_open_pos
nesting_level -= 1
search_pos = next_close_pos
end_pos = next_close_pos
if end_pos < 0: # if we didn't find a closer, just highlight the
# rest of the text in this class
end_pos = len(text)
if adverbs is not None and re.search(r':to\b', adverbs):
heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos]
end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) +
r'\s*$', text[end_pos:], re.MULTILINE)
if end_heredoc:
end_pos += end_heredoc.end()
else:
end_pos = len(text)
yield match.start(), token_class, text[match.start():end_pos + n_chars]
context.pos = end_pos + n_chars
return callback
def opening_brace_callback(lexer, match, context):
stack = context.stack
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
# if we encounter an opening brace and we're one level
# below a token state, it means we need to increment
# the nesting level for braces so we know later when
# we should return to the token rules.
if len(stack) > 2 and stack[-2] == 'token':
context.perl6_token_nesting_level += 1
def closing_brace_callback(lexer, match, context):
stack = context.stack
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
# if we encounter a free closing brace and we're one level
# below a token state, it means we need to check the nesting
# level to see if we need to return to the token state.
if len(stack) > 2 and stack[-2] == 'token':
context.perl6_token_nesting_level -= 1
if context.perl6_token_nesting_level == 0:
stack.pop()
def embedded_perl6_callback(lexer, match, context):
context.perl6_token_nesting_level = 1
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
context.stack.append('root')
# If you're modifying these rules, be careful if you need to process '{' or '}'
# characters. We have special logic for processing these characters (due to the fact
# that you can nest Perl 6 code in regex blocks), so if you need to process one of
# them, make sure you also process the corresponding one!
tokens = {
'common': [
(r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)',
brackets_callback(Comment.Multiline)),
(r'#[^\n]*$', Comment.Singleline),
(r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
(r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
(r'^=.*?\n\s*?\n', Comment.Multiline),
(r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)',
bygroups(Keyword, Name), 'token-sym-brackets'),
(r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?',
bygroups(Keyword, Name), 'pre-token'),
# deal with a special case in the Perl 6 grammar (role q { ... })
(r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)),
(_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword),
(_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'),
Name.Builtin),
(_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin),
# copied from PerlLexer
(r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*',
Name.Variable),
(r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
(r'::\?\w+', Name.Variable.Global),
(r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*',
Name.Variable.Global),
(r'\$(?:<.*?>)+', Name.Variable),
(r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])'
r'(?P=first_char)*)', brackets_callback(String)),
# copied from PerlLexer
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'm\w+(?=\()', Name),
(r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^\w:\s])'
r'(?P=first_char)*)', brackets_callback(String.Regex)),
(r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/',
String.Regex),
(r'<[^\s=].*?\S>', String),
(_build_word_match(PERL6_OPERATORS), Operator),
(r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
],
'root': [
include('common'),
(r'\{', opening_brace_callback),
(r'\}', closing_brace_callback),
(r'.+?', Text),
],
'pre-token': [
include('common'),
(r'\{', Text, ('#pop', 'token')),
(r'.+?', Text),
],
'token-sym-brackets': [
(r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)',
brackets_callback(Name), ('#pop', 'pre-token')),
default(('#pop', 'pre-token')),
],
'token': [
(r'\}', Text, '#pop'),
(r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)),
# make sure that quotes in character classes aren't treated as strings
(r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex),
# make sure that '#' characters in quotes aren't treated as comments
(r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex),
(r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex),
(r'#.*?$', Comment.Singleline),
(r'\{', embedded_perl6_callback),
('.+?', String.Regex),
],
}
def analyse_text(text):
def strip_pod(lines):
in_pod = False
stripped_lines = []
for line in lines:
if re.match(r'^=(?:end|cut)', line):
in_pod = False
elif re.match(r'^=\w+', line):
in_pod = True
elif not in_pod:
stripped_lines.append(line)
return stripped_lines
# XXX handle block comments
lines = text.splitlines()
lines = strip_pod(lines)
text = '\n'.join(lines)
if shebang_matches(text, r'perl6|rakudo|niecza|pugs'):
return True
saw_perl_decl = False
rating = False
# check for my/our/has declarations
if re.search("(?:my|our|has)\s+(?:" + Perl6Lexer.PERL6_IDENTIFIER_RANGE +
"+\s+)?[$@%&(]", text):
rating = 0.8
saw_perl_decl = True
for line in lines:
line = re.sub('#.*', '', line)
if re.match('^\s*$', line):
continue
# match v6; use v6; use v6.0; use v6.0.0;
if re.match('^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line):
return True
# match class, module, role, enum, grammar declarations
class_decl = re.match('^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line)
if class_decl:
if saw_perl_decl or class_decl.group('scope') is not None:
return True
rating = 0.05
continue
break
return rating
def __init__(self, **options):
super(Perl6Lexer, self).__init__(**options)
self.encoding = options.get('encoding', 'utf-8')
| mit |
agreco/git | git_remote_helpers/git/importer.py | 19 | 1900 | import os
import subprocess
from git_remote_helpers.util import check_call, check_output
class GitImporter(object):
"""An importer for testgit repositories.
This importer simply delegates to git fast-import.
"""
def __init__(self, repo):
"""Creates a new importer for the specified repo.
"""
self.repo = repo
def get_refs(self, gitdir):
"""Returns a dictionary with refs.
Note that the keys in the returned dictionary are byte strings as
read from git.
"""
args = ["git", "--git-dir=" + gitdir, "for-each-ref", "refs/heads"]
lines = check_output(args).strip().split('\n'.encode('ascii'))
refs = {}
for line in lines:
value, name = line.split(' '.encode('ascii'))
name = name.strip('commit\t'.encode('ascii'))
refs[name] = value
return refs
def do_import(self, base):
"""Imports a fast-import stream to the given directory.
Simply delegates to git fast-import.
"""
dirname = self.repo.get_base_path(base)
if self.repo.local:
gitdir = self.repo.gitpath
else:
gitdir = os.path.abspath(os.path.join(dirname, '.git'))
path = os.path.abspath(os.path.join(dirname, 'testgit.marks'))
if not os.path.exists(dirname):
os.makedirs(dirname)
refs_before = self.get_refs(gitdir)
args = ["git", "--git-dir=" + gitdir, "fast-import", "--quiet", "--export-marks=" + path]
if os.path.exists(path):
args.append("--import-marks=" + path)
check_call(args)
refs_after = self.get_refs(gitdir)
changed = {}
for name, value in refs_after.iteritems():
if refs_before.get(name) == value:
continue
changed[name] = value
return changed
| gpl-2.0 |
umkay/zulip | zerver/lib/test_helpers.py | 4 | 27491 | from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional,
Sized, Tuple, Union)
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.handlers import allocate_handler_id
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient,
)
from zerver.models import (
get_realm,
get_stream,
get_user_profile_by_email,
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
import base64
import mock
import os
import re
import time
import ujson
import unittest
from six.moves import urllib
from six import text_type, binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
API_KEYS = {} # type: Dict[text_type, text_type]
skip_py3 = unittest.skipIf(six.PY3, "Expected failure on Python 3")
@contextmanager
def simulated_queue_client(client):
# type: (type) -> Generator[None, None, None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List[Mapping[str, Any]]) -> Generator[None, None, None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, Union[text_type, List[text_type]], text_type]], None, None]
cache_queries = [] # type: List[Tuple[str, Union[text_type, List[text_type]], text_type]]
def my_cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
# type: () -> Generator[List[Dict[str, Union[str, binary_type]]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, binary_type]]]
def wrapper_execute(self, action, sql, params=()):
# type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def make_client(name):
# type: (str) -> Client
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address):
# type: (text_type) -> text_type
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
# type: (Dict[str, Any]) -> Set[int]
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
# type: (UserProfile) -> int
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
# type: (UserProfile) -> UserMessage
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
# type: (UserProfile) -> Message
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
# type: (UserProfile) -> List[Message]
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(object):
def __init__(self):
# type: (Callable) -> None
allocate_handler_id(self)
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile):
# type: (Dict[str, Any], UserProfile) -> None
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
class HostRequestMock(object):
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, host=settings.EXTERNAL_HOST):
# type: (text_type) -> None
self.host = host
def get_host(self):
# type: () -> text_type
return self.host
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def instrument_url(f):
# type: (UrlFuncT) -> UrlFuncT
if not INSTRUMENTING:
return f
else:
def wrapper(self, url, info={}, **kwargs):
# type: (Any, text_type, Dict[str, Any], **Any) -> HttpResponse
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
INSTRUMENTED_CALLS.append(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports():
# type: () -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError:
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
print('URL coverage report is in %s' % (fn,))
print('Try running: ./tools/analyze-url-coverage')
# Find our untested urls.
from zproject.urls import urlpatterns
untested_patterns = []
for pattern in urlpatterns:
for call in calls:
url = call['url']
if url.startswith('/'):
url = url[1:]
if pattern.regex.match(url):
break
else:
untested_patterns.append(pattern.regex.pattern)
fn = os.path.join(var_dir, 'untested_url_report.txt')
with open(fn, 'w') as f:
f.write('untested urls\n')
for untested_pattern in sorted(untested_patterns):
f.write(' %s\n' % (untested_pattern,))
print('Untested-url report is in %s' % (fn,))
class ZulipTestCase(TestCase):
'''
WRAPPER_COMMENT:
We wrap calls to self.client.{patch,put,get,post,delete} for various
reasons. Some of this has to do with fixing encodings before calling
into the Django code. Some of this has to do with providing a future
path for instrumentation. Some of it's just consistency.
The linter will prevent direct calls to self.client.foo, so the wrapper
functions have to fake out the linter by using a local variable called
django_client to fool the regext.
'''
@instrument_url
def client_patch(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
"""
We need to urlencode, since Django's function won't do it for us.
"""
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(url, encoded, **kwargs)
@instrument_url
def client_patch_multipart(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
@instrument_url
def client_put(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.delete(url, encoded, **kwargs)
@instrument_url
def client_post(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.post(url, info, **kwargs)
@instrument_url
def client_get(self, url, info={}, **kwargs):
# type: (text_type, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.get(url, info, **kwargs)
def login_with_return(self, email, password=None):
# type: (text_type, Optional[text_type]) -> HttpResponse
if password is None:
password = initial_password(email)
return self.client_post('/accounts/login/',
{'username': email, 'password': password})
def login(self, email, password=None, fails=False):
# type: (text_type, Optional[text_type], bool) -> HttpResponse
if password is None:
password = initial_password(email)
if not fails:
self.assertTrue(self.client.login(username=email, password=password))
else:
self.assertFalse(self.client.login(username=email, password=password))
def register(self, username, password, domain="zulip.com"):
# type: (text_type, text_type, text_type) -> HttpResponse
self.client_post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com",
realm_name=None, realm_subdomain=None,
realm_org_type=Realm.COMMUNITY, **kwargs):
# type: (text_type, text_type, text_type, Optional[text_type], Optional[text_type], int, **Any) -> HttpResponse
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
You can pass the HTTP_HOST variable for subdomains via kwargs.
"""
return self.client_post('/accounts/register/',
{'full_name': username, 'password': password,
'realm_name': realm_name,
'realm_subdomain': realm_subdomain,
'key': find_key_by_email(username + '@' + domain),
'realm_org_type': realm_org_type,
'terms': True},
**kwargs)
def get_confirmation_url_from_outbox(self, email_address, path_pattern="(\S+)>"):
# type: (text_type, text_type) -> text_type
from django.core.mail import outbox
for message in reversed(outbox):
if email_address in message.to:
return re.search(settings.EXTERNAL_HOST + path_pattern,
message.body).groups()[0]
else:
raise ValueError("Couldn't find a confirmation email.")
def get_api_key(self, email):
# type: (text_type) -> text_type
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
# type: (text_type) -> Dict[str, text_type]
credentials = u"%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': u'Basic ' + base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
}
def get_streams(self, email):
# type: (text_type) -> List[text_type]
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
return [cast(text_type, get_display_recipient(sub.recipient)) for sub in subs]
def send_message(self, sender_name, raw_recipients, message_type,
content=u"test content", subject=u"test", **kwargs):
# type: (text_type, Union[text_type, List[text_type]], int, text_type, text_type, **Any) -> int
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(raw_recipients, six.string_types):
recipient_list = [raw_recipients]
else:
recipient_list = raw_recipients
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
# type: (int, int, int) -> List[Dict[str, Any]]
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client_get("/json/messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
# type: (text_type, text_type) -> List[UserProfile]
realm = get_realm(realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
# type: (HttpResponse) -> Dict[str, Any]
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
# type: (HttpResponse, int) -> Dict[str, Any]
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
# type: (HttpResponse, text_type, int) -> None
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count):
# type: (Sized, int) -> None
actual_count = len(queries)
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
def assert_max_length(self, queries, count):
# type: (Sized, int) -> None
actual_count = len(queries)
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring, status_code=400):
# type: (HttpResponse, text_type, int) -> None
self.assertIn(msg_substring, self.get_json_error(result, status_code=status_code))
def assert_equals_response(self, string, response):
# type: (text_type, HttpResponse) -> None
self.assertEqual(string, response.content.decode('utf-8'))
def assert_in_response(self, substring, response):
# type: (text_type, HttpResponse) -> None
self.assertIn(substring, response.content.decode('utf-8'))
def fixture_data(self, type, action, file_type='json'):
# type: (text_type, text_type, text_type) -> text_type
return force_text(open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action, file_type))).read())
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
# type: (text_type, text_type, Optional[Realm]) -> None
if realm is None:
realm = get_realm(resolve_email_to_domain(email))
stream = get_stream(stream_name, realm)
if stream is None:
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data={}, invite_only=False):
# type: (text_type, Iterable[text_type], Dict[str, Any], bool) -> HttpResponse
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client_post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
# type: (text_type, text_type, Union[text_type, Dict[str, Any]], Optional[text_type], **Any) -> Message
if stream_name is not None:
self.subscribe_to_stream(email, stream_name)
result = self.client_post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = self.get_last_message()
self.assertEqual(msg.sender.email, email)
if stream_name is not None:
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
# TODO: should also validate recipient for private messages
return msg
def get_last_message(self):
# type: () -> Message
return Message.objects.latest('id')
def get_second_to_last_message(self):
# type: () -> Message
return Message.objects.all().order_by('-id')[1]
@contextmanager
def simulated_markdown_failure(self):
# type: () -> Generator[None, None, None]
'''
This raises a failure inside of the try/except block of
bugdown.__init__.do_convert.
'''
with \
self.settings(ERROR_BOT=None), \
mock.patch('zerver.lib.bugdown.timeout', side_effect=KeyError('foo')), \
mock.patch('zerver.lib.bugdown.log_bugdown_error'):
yield
class WebhookTestCase(ZulipTestCase):
"""
Common for all webhooks tests
Override below class attributes and run send_and_test_message
If you create your url in uncommon way you can override build_webhook_url method
In case that you need modify body or create it without using fixture you can also override get_body method
"""
STREAM_NAME = None # type: Optional[text_type]
TEST_USER_EMAIL = 'webhook-bot@zulip.com'
URL_TEMPLATE = None # type: Optional[text_type]
FIXTURE_DIR_NAME = None # type: Optional[text_type]
def setUp(self):
# type: () -> None
self.url = self.build_webhook_url()
def send_and_test_stream_message(self, fixture_name, expected_subject=None,
expected_message=None, content_type="application/json", **kwargs):
# type: (text_type, Optional[text_type], Optional[text_type], Optional[text_type], **Any) -> Message
payload = self.get_body(fixture_name)
if content_type is not None:
kwargs['content_type'] = content_type
msg = self.send_json_payload(self.TEST_USER_EMAIL, self.url, payload,
self.STREAM_NAME, **kwargs)
self.do_test_subject(msg, expected_subject)
self.do_test_message(msg, expected_message)
return msg
def send_and_test_private_message(self, fixture_name, expected_subject=None,
expected_message=None, content_type="application/json", **kwargs):
# type: (text_type, text_type, text_type, str, **Any) -> Message
payload = self.get_body(fixture_name)
if content_type is not None:
kwargs['content_type'] = content_type
msg = self.send_json_payload(self.TEST_USER_EMAIL, self.url, payload,
stream_name=None, **kwargs)
self.do_test_message(msg, expected_message)
return msg
def build_webhook_url(self):
# type: () -> text_type
api_key = self.get_api_key(self.TEST_USER_EMAIL)
return self.URL_TEMPLATE.format(stream=self.STREAM_NAME, api_key=api_key)
def get_body(self, fixture_name):
# type: (text_type) -> Union[text_type, Dict[str, text_type]]
"""Can be implemented either as returning a dictionary containing the
post parameters or as string containing the body of the request."""
return ujson.dumps(ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, fixture_name)))
def do_test_subject(self, msg, expected_subject):
# type: (Message, Optional[text_type]) -> None
if expected_subject is not None:
self.assertEqual(msg.topic_name(), expected_subject)
def do_test_message(self, msg, expected_message):
# type: (Message, Optional[text_type]) -> None
if expected_message is not None:
self.assertEqual(msg.content, expected_message)
def get_all_templates():
# type: () -> List[str]
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p, n):
# type: (text_type, text_type) -> bool
return not n.startswith('.') and not n.startswith('__init__') and isfile(p)
def process(template_dir, dirname, fnames):
# type: (str, str, Iterable[str]) -> None
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
| apache-2.0 |
jeffchao/xen-3.3-tcg | tools/python/build/lib.linux-i686-2.6/xen/xend/XendClient.py | 49 | 1474 | #!/usr/bin/env python
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
#============================================================================
from xen.util.xmlrpcclient import ServerProxy
import os
import sys
XML_RPC_SOCKET = "/var/run/xend/xmlrpc.sock"
XEN_API_SOCKET = "/var/run/xend/xen-api.sock"
ERROR_INTERNAL = 1
ERROR_GENERIC = 2
ERROR_INVALID_DOMAIN = 3
uri = 'httpu:///var/run/xend/xmlrpc.sock'
if os.environ.has_key('XM_SERVER'):
uri = os.environ['XM_SERVER']
try:
server = ServerProxy(uri)
except ValueError, exn:
print >>sys.stderr, exn
sys.exit(1)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.