repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
fakhir-hanif/sentiment_gender | gender_dict.py | 1 | 2359 | gender = {
'fakhir': 'male', 'Amina': 'female', 'telenor': 'undefined', 'shahid': 'male', 'talkshalk': 'undefined', 'ikram': 'male',
'pakistan': 'undefined', 'pakistani': 'undefined', 'raja': 'male', 'pome': 'male', 'zikriya': 'male', 'djuice': 'undefined',
'easypesa': 'undefined', 'qaswar': 'male', 'shahjehan': 'male', 'shoaib': 'male', 'rana': 'male', 'abrar': 'male', 'allahdad': 'male',
'mushtaq': 'male', 'shehzada': 'male', 'dawoodshah': 'male', 'zarnain': 'male', 'afnan': 'male', 'easypaisa': 'undefined', 'zia': 'male',
'shaam': 'male', 'bigra': 'male', 'arsam': 'male', 'engr': 'male', 'inzamam': 'male', 'AR': 'male', 'panjatan': 'undefined',
'panjtan': 'undefined', 'kami': 'male', 'jinsar': 'male', 'ghamgin': 'male', 'ghamgeen': 'male', 'joni': 'male', 'janzeb': 'male',
'sheikhzada': 'male', 'star': 'male', 'ipl': 'undefined', 'ary': 'undefined', 'tt': 'undefined', 'f': 'undefined', 'nge': 'undefined',
'jutt': 'male', 'suhana': 'male', 'snl': 'undefined', 'haroonleo': 'male', 'zulfqar': 'male', 'telecom': 'undefined',
'marketing': 'undefined', 'pinzoo': 'undefined', 'free': 'unidefined', 'newspaper': 'undefined', 'fata': 'undefined',
'innenriks': 'undefined', 'telenorinnews': 'undefined', 'sks': 'undefined', 'king': 'male', 'kingkashif': 'male', 'online': 'undefined',
'national': 'undefined', 'city': 'undefined', 'hamaraqueta': 'undefined', 'pyara': 'undefined', 'news': 'undefined',
'newsbuzz': 'undefined', 'malumati': 'undefined', 'lala': 'male', 'paknetwork': 'undefined', 'fan': 'undefined', 'baadshaah': 'male',
'byskog': 'undefined', 'dhamaka': 'undefined', 'dhamakaweek': 'undefined', 'dailypeshawar': 'undefined', 'student': 'undefined',
'suchkhabar': 'undefined', 'medianama.com': 'undefined', 'liveurdu': 'undefined', 'singh': 'undefined', 'exchange': 'undefined',
'cricket': 'undefined', 'pak': 'undefined', 'live': 'undefined', 'usefulsms': 'undefined', 'shabqadar': 'undefined', 'd': 'undefined',
'poland': 'undefined', 'mwk': 'undefined', 'daily': 'undefined', 'bookcomplaint': 'undefined', 'bookcomplaints': 'undefined',
'karandaaz': 'undefined', 'cheezmall': 'undefined', 'samaa': 'undefined', 'qari': 'undefined', 'rahuul': 'undefined',
'amirzada': 'undefined', 'lovequaideazam': 'undefined', 'asianet': 'undefined', 'pkjobvacancy': 'undefined',
'business': 'undefined',
}
| bsd-2-clause |
arenadata/ambari | ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout.py | 1 | 2241 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
from resource_management.core.resources import Directory
from resource_management.core.resources import File
from resource_management.libraries.functions import format
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions import lzo_utils
from resource_management.libraries.resources import XmlConfig
def mahout():
import params
# ensure that matching LZO libraries are installed for Mahout
lzo_utils.install_lzo_if_needed()
Directory( params.mahout_conf_dir,
create_parents = True,
owner = params.mahout_user,
group = params.user_group
)
XmlConfig("yarn-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['yarn-site'],
configuration_attributes=params.config['configuration_attributes']['yarn-site'],
owner=params.yarn_user,
group=params.user_group,
mode=0644
)
if not is_empty(params.log4j_props):
File(format("{params.mahout_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.mahout_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.mahout_conf_dir}/log4j.properties"))):
File(format("{params.mahout_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.mahout_user
)
| apache-2.0 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/messages/storage/cookie.py | 168 | 6425 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.safestring import SafeData, mark_safe
from django.utils import six
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return dict([(key, self.process_messages(value))
for key, value in six.iteritems(obj)])
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds it's own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes a encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| apache-2.0 |
wangyum/mxnet | python/mxnet/gluon/contrib/rnn/__init__.py | 43 | 1029 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Contrib recurrent neural network module."""
from . import conv_rnn_cell, rnn_cell
from .conv_rnn_cell import *
from .rnn_cell import *
__all__ = conv_rnn_cell.__all__ + rnn_cell.__all__
| apache-2.0 |
jneves/python-social-auth | social/apps/cherrypy_app/utils.py | 77 | 1721 | import warnings
from functools import wraps
import cherrypy
from social.utils import setting_name, module_member
from social.strategies.utils import get_strategy
from social.backends.utils import get_backend, user_backends_data
DEFAULTS = {
'STRATEGY': 'social.strategies.cherrypy_strategy.CherryPyStrategy',
'STORAGE': 'social.apps.cherrypy_app.models.CherryPyStorage'
}
def get_helper(name):
return cherrypy.config.get(setting_name(name), DEFAULTS.get(name, None))
def load_backend(strategy, name, redirect_uri):
backends = get_helper('AUTHENTICATION_BACKENDS')
Backend = get_backend(backends, name)
return Backend(strategy=strategy, redirect_uri=redirect_uri)
def psa(redirect_uri=None):
def decorator(func):
@wraps(func)
def wrapper(self, backend=None, *args, **kwargs):
uri = redirect_uri
if uri and backend and '%(backend)s' in uri:
uri = uri % {'backend': backend}
self.strategy = get_strategy(get_helper('STRATEGY'),
get_helper('STORAGE'))
self.backend = load_backend(self.strategy, backend, uri)
return func(self, backend, *args, **kwargs)
return wrapper
return decorator
def backends(user):
"""Load Social Auth current user data to context under the key 'backends'.
Will return the output of social.backends.utils.user_backends_data."""
return user_backends_data(user, get_helper('AUTHENTICATION_BACKENDS'),
module_member(get_helper('STORAGE')))
def strategy(*args, **kwargs):
warnings.warn('@strategy decorator is deprecated, use @psa instead')
return psa(*args, **kwargs)
| bsd-3-clause |
avsm/xen-unstable | tools/python/xen/web/SrvBase.py | 44 | 3290 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
#============================================================================
import types
from xen.xend import sxp
from xen.xend import PrettyPrint
from xen.xend.Args import ArgError
from xen.xend.XendError import XendError
from xen.xend.XendLogging import log
import resource
import http
import httpserver
def uri_pathlist(p):
"""Split a path into a list.
p path
return list of path elements
"""
l = []
for x in p.split('/'):
if x == '': continue
l.append(x)
return l
class SrvBase(resource.Resource):
"""Base class for services.
"""
def use_sxp(self, req):
return req.useSxp()
def get_op_method(self, op):
"""Get the method for an operation.
For operation 'foo' looks for 'op_foo'.
op operation name
returns method or None
"""
op_method_name = 'op_' + op
return getattr(self, op_method_name, None)
def perform(self, req):
"""General operation handler for posted operations.
For operation 'foo' looks for a method op_foo and calls
it with op_foo(op, req). Replies with code 500 if op_foo
is not found.
The method must return a list when req.use_sxp is true
and an HTML string otherwise (or list).
Methods may also return a ThreadRequest (for incomplete processing).
req request
"""
op = req.args.get('op')
if op is None or len(op) != 1:
req.setResponseCode(http.NOT_ACCEPTABLE, "Invalid request")
return ''
op = op[0]
op_method = self.get_op_method(op)
if op_method is None:
req.setResponseCode(http.NOT_IMPLEMENTED, "Operation not implemented: " + op)
req.setHeader("Content-Type", "text/plain")
req.write("Operation not implemented: " + op)
return ''
else:
try:
return op_method(op, req)
except Exception, exn:
req.setResponseCode(http.INTERNAL_SERVER_ERROR, "Request failed: " + op)
log.exception("Request %s failed.", op)
if req.useSxp():
return ['xend.err', str(exn)]
else:
return "<p>%s</p>" % str(exn)
def print_path(self, req):
"""Print the path with hyperlinks.
"""
req.printPath()
| gpl-2.0 |
ojengwa/grr | gui/api_call_renderers_test.py | 2 | 6040 | #!/usr/bin/env python
"""Tests for API call renderers."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import json
from grr.gui import api_aff4_object_renderers
from grr.gui import api_call_renderers
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import test_lib
from grr.lib import utils
from grr.proto import tests_pb2
class SampleGetRendererArgs(rdfvalue.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererArgs
class SampleGetRenderer(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererArgs
def Render(self, args, token=None):
return {
"method": "GET",
"path": args.path,
"foo": args.foo
}
class SampleGetRendererWithAdditionalArgsArgs(rdfvalue.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererWithAdditionalArgsArgs
class SampleGetRendererWithAdditionalArgs(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererWithAdditionalArgsArgs
additional_args_types = {
"AFF4Object": api_aff4_object_renderers.ApiAFF4ObjectRendererArgs,
"RDFValueCollection": (api_aff4_object_renderers.
ApiRDFValueCollectionRendererArgs)
}
def Render(self, args, token=None):
result = {
"method": "GET",
"path": args.path,
"foo": args.foo
}
if args.additional_args:
rendered_additional_args = []
for arg in args.additional_args:
rendered_additional_args.append(str(arg))
result["additional_args"] = rendered_additional_args
return result
class TestHttpRoutingInit(registry.InitHook):
def RunOnce(self):
api_call_renderers.RegisterHttpRouteHandler(
"GET", "/test_sample/<path:path>", SampleGetRenderer)
api_call_renderers.RegisterHttpRouteHandler(
"GET", "/test_sample_with_additional_args/<path:path>",
SampleGetRendererWithAdditionalArgs)
class RenderHttpResponseTest(test_lib.GRRBaseTest):
"""Test for api_call_renderers.RenderHttpResponse logic."""
def _CreateRequest(self, method, path, query_parameters=None):
if not query_parameters:
query_parameters = {}
request = utils.DataObject()
request.method = method
request.path = path
request.scheme = "http"
request.environ = {
"SERVER_NAME": "foo.bar",
"SERVER_PORT": 1234
}
request.user = "test"
if method == "GET":
request.GET = query_parameters
request.META = {}
return request
def _RenderResponse(self, request):
response = api_call_renderers.RenderHttpResponse(request)
if response.content.startswith(")]}'\n"):
response.content = response.content[5:]
return response
def testReturnsRendererMatchingUrlAndMethod(self):
renderer, _ = api_call_renderers.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertTrue(isinstance(renderer, SampleGetRenderer))
def testPathParamsAreReturnedWithMatchingRenderer(self):
_, path_params = api_call_renderers.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(path_params, {"path": "some/path"})
def testRaisesIfNoRendererMatchesUrl(self):
self.assertRaises(api_call_renderers.ApiCallRendererNotFoundError,
api_call_renderers.GetRendererForHttpRequest,
self._CreateRequest("GET",
"/some/missing/path"))
def testRendersGetRendererCorrectly(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
self.assertEqual(response.status_code, 200)
def testQueryParamsArePassedIntoRendererArgs(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"foo": "bar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "bar"})
def testRouteArgumentTakesPrecedenceOverQueryParams(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"path": "foobar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
def testAdditionalArgumentsAreParsedCorrectly(self):
additional_args = api_call_renderers.FillAdditionalArgsFromRequest(
{"AFF4Object.limit_lists": "10",
"RDFValueCollection.with_total_count": "1"},
{"AFF4Object": rdfvalue.ApiAFF4ObjectRendererArgs,
"RDFValueCollection": rdfvalue.ApiRDFValueCollectionRendererArgs})
additional_args = sorted(additional_args, key=lambda x: x.name)
self.assertListEqual(
[x.name for x in additional_args],
["AFF4Object", "RDFValueCollection"])
self.assertListEqual(
[x.type for x in additional_args],
["ApiAFF4ObjectRendererArgs", "ApiRDFValueCollectionRendererArgs"])
self.assertListEqual(
[x.args for x in additional_args],
[rdfvalue.ApiAFF4ObjectRendererArgs(limit_lists=10),
rdfvalue.ApiRDFValueCollectionRendererArgs(with_total_count=True)])
def testAdditionalArgumentsAreFoundAndPassedToTheRenderer(self):
response = self._RenderResponse(
self._CreateRequest("GET",
"/test_sample_with_additional_args/some/path",
query_parameters={"foo": "42"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "42"})
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
vmindru/ansible | lib/ansible/modules/packaging/os/zypper_repository.py | 81 | 13626 | #!/usr/bin/python
# encoding: utf-8
# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
# (c) 2014, Justin Lecher <jlec@gentoo.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zypper_repository
author: "Matthias Vogelgesang (@matze)"
version_added: "1.4"
short_description: Add and remove Zypper repositories
description:
- Add or remove Zypper repositories on SUSE and openSUSE
options:
name:
description:
- A name for the repository. Not required when adding repofiles.
repo:
description:
- URI of the repository or .repo file. Required when state=present.
state:
description:
- A source string state.
choices: [ "absent", "present" ]
default: "present"
description:
description:
- A description of the repository
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
- Needs zypper version >= 1.6.2.
type: bool
default: 'no'
autorefresh:
description:
- Enable autorefresh of the repository.
type: bool
default: 'yes'
aliases: [ "refresh" ]
priority:
description:
- Set priority of repository. Packages will always be installed
from the repository with the smallest priority number.
- Needs zypper version >= 1.12.25.
version_added: "2.1"
overwrite_multiple:
description:
- Overwrite multiple repository entries, if repositories with both name and
URL already exist.
type: bool
default: 'no'
version_added: "2.1"
auto_import_keys:
description:
- Automatically import the gpg signing key of the new or changed repository.
- Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
- Implies runrefresh.
- Only works with C(.repo) files if `name` is given explicitly.
type: bool
default: 'no'
version_added: "2.2"
runrefresh:
description:
- Refresh the package list of the given repository.
- Can be used with repo=* to refresh all repositories.
type: bool
default: 'no'
version_added: "2.2"
enabled:
description:
- Set repository to enabled (or disabled).
type: bool
default: 'yes'
version_added: "2.2"
requirements:
- "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
- python-xml
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: present
# Remove NVIDIA repository
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: absent
# Add python development repository
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
# Refresh all repos
- zypper_repository:
repo: '*'
runrefresh: yes
# Add a repo and add it's gpg key
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
auto_import_keys: yes
# Force refresh of a repository
- zypper_repository:
repo: 'http://my_internal_ci_repo/repo'
name: my_ci_repo
state: present
runrefresh: yes
'''
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
def _get_cmd(*args):
"""Combines the non-interactive zypper command with arguments/subcommands"""
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
cmd.extend(args)
return cmd
def _parse_repos(module):
"""parses the output of zypper --xmlout repos and return a parse repo dictionary"""
cmd = _get_cmd('--xmlout', 'repos')
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
repos = []
dom = parseXML(stdout)
repo_list = dom.getElementsByTagName('repo')
for repo in repo_list:
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
# exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
elif rc == 6:
return []
else:
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
def _repo_changes(realrepo, repocmp):
"Check whether the 2 given repos have different settings."
for k in repocmp:
if repocmp[k] and k not in realrepo:
return True
for k, v in realrepo.items():
if k in repocmp and repocmp[k]:
valold = str(repocmp[k] or "")
valnew = v or ""
if k == "url":
valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
if valold != valnew:
return True
return False
def repo_exists(module, repodata, overwrite_multiple):
"""Check whether the repository already exists.
returns (exists, mod, old_repos)
exists: whether a matching (name, URL) repo exists
mod: whether there are changes compared to the existing repo
old_repos: list of matching repos
"""
existing_repos = _parse_repos(module)
# look for repos that have matching alias or url to the one searched
repos = []
for kw in ['alias', 'url']:
name = repodata[kw]
for oldr in existing_repos:
if repodata[kw] == oldr[kw] and oldr not in repos:
repos.append(oldr)
if len(repos) == 0:
# Repo does not exist yet
return (False, False, None)
elif len(repos) == 1:
# Found an existing repo, look for changes
has_changes = _repo_changes(repos[0], repodata)
return (True, has_changes, repos)
elif len(repos) >= 2:
if overwrite_multiple:
# Found two repos and want to overwrite_multiple
return (True, True, repos)
else:
errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
module.fail_json(msg=errmsg)
def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
"Adds the repo, removes old repos before, that would conflict."
repo = repodata['url']
cmd = _get_cmd('addrepo', '--check')
if repodata['name']:
cmd.extend(['--name', repodata['name']])
# priority on addrepo available since 1.12.25
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
if repodata['priority']:
if zypper_version >= LooseVersion('1.12.25'):
cmd.extend(['--priority', str(repodata['priority'])])
else:
warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
if repodata['enabled'] == '0':
cmd.append('--disable')
# gpgcheck available since 1.6.2
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
# the default changed in the past, so don't assume a default here and show warning for old zypper versions
if zypper_version >= LooseVersion('1.6.2'):
if repodata['gpgcheck'] == '1':
cmd.append('--gpgcheck')
else:
cmd.append('--no-gpgcheck')
else:
warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
if repodata['autorefresh'] == '1':
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
cmd.append(repodata['alias'])
if old_repos is not None:
for oldrepo in old_repos:
remove_repo(module, oldrepo['url'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc, stdout, stderr
def remove_repo(module, repo):
"Removes the repo."
cmd = _get_cmd('removerepo', repo)
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def get_zypper_version(module):
rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
if rc != 0 or not stdout.startswith('zypper '):
return LooseVersion('1.0')
return LooseVersion(stdout.split()[1])
def runrefreshrepo(module, auto_import_keys=False, shortname=None):
"Forces zypper to refresh repo metadata."
if auto_import_keys:
cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
else:
cmd = _get_cmd('refresh', '--force')
if shortname is not None:
cmd.extend(['-r', shortname])
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
runrefresh=dict(required=False, default='no', type='bool'),
description=dict(required=False),
disable_gpg_check=dict(required=False, default=False, type='bool'),
autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
priority=dict(required=False, type='int'),
enabled=dict(required=False, default=True, type='bool'),
overwrite_multiple=dict(required=False, default=False, type='bool'),
auto_import_keys=dict(required=False, default=False, type='bool'),
),
supports_check_mode=False,
required_one_of=[['state', 'runrefresh']],
)
repo = module.params['repo']
alias = module.params['name']
state = module.params['state']
overwrite_multiple = module.params['overwrite_multiple']
auto_import_keys = module.params['auto_import_keys']
runrefresh = module.params['runrefresh']
zypper_version = get_zypper_version(module)
warnings = [] # collect warning messages for final output
repodata = {
'url': repo,
'alias': alias,
'name': module.params['description'],
'priority': module.params['priority'],
}
# rewrite bools in the language that zypper lr -x provides for easier comparison
if module.params['enabled']:
repodata['enabled'] = '1'
else:
repodata['enabled'] = '0'
if module.params['disable_gpg_check']:
repodata['gpgcheck'] = '0'
else:
repodata['gpgcheck'] = '1'
if module.params['autorefresh']:
repodata['autorefresh'] = '1'
else:
repodata['autorefresh'] = '0'
def exit_unchanged():
module.exit_json(changed=False, repodata=repodata, state=state)
# Check run-time module parameters
if repo == '*' or alias == '*':
if runrefresh:
runrefreshrepo(module, auto_import_keys)
module.exit_json(changed=False, runrefresh=True)
else:
module.fail_json(msg='repo=* can only be used with the runrefresh option.')
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
if state == 'absent' and not repo and not alias:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
if alias:
module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
else:
if not alias and state == "present":
module.fail_json(msg='Name required when adding non-repo files.')
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
if repo:
shortname = repo
else:
shortname = alias
if state == 'present':
if exists and not mod:
if runrefresh:
runrefreshrepo(module, auto_import_keys, shortname)
exit_unchanged()
rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
if rc == 0 and (runrefresh or auto_import_keys):
runrefreshrepo(module, auto_import_keys, shortname)
elif state == 'absent':
if not exists:
exit_unchanged()
rc, stdout, stderr = remove_repo(module, shortname)
if rc == 0:
module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
else:
module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
grayark/osquery | tools/tests/test_osqueryi.py | 36 | 5050 | #!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
#from __future__ import unicode_literals
import os
import random
import unittest
# osquery-specific testing utils
import test_base
SHELL_TIMEOUT = 10
class OsqueryiTest(unittest.TestCase):
def setUp(self):
self.binary = os.path.join(test_base.ARGS.build, "osquery", "osqueryi")
self.osqueryi = test_base.OsqueryWrapper(self.binary)
self.dbpath = "%s%s" % (
test_base.CONFIG["options"]["database_path"],
str(random.randint(1000, 9999)))
def test_error(self):
'''Test that we throw an error on bad query'''
self.osqueryi.run_command(' ')
self.assertRaises(test_base.OsqueryException,
self.osqueryi.run_query, 'foo')
def test_config_check_success(self):
'''Test that a 0-config passes'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.config" % test_base.SCRIPT_DIR
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_config_check_failure(self):
'''Test that a missing config fails'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=/this/path/does/not/exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 1)
# Now with a valid path, but invalid content.
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.badconfig" % test_base.SCRIPT_DIR
],
SHELL_TIMEOUT)
self.assertEqual(proc.proc.poll(), 1)
self.assertNotEqual(proc.stderr, "")
# Finally with a missing config plugin
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_plugin=does_not_exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
self.assertNotEqual(proc.proc.poll(), 0)
def test_meta_commands(self):
'''Test the supported meta shell/help/info commands'''
commands = [
'.help',
'.all',
'.all osquery_info',
'.all this_table_does_not_exist',
'.echo',
'.echo on',
'.echo off',
'.header',
'.header off',
'.header on',
'.mode',
'.mode csv',
'.mode column',
'.mode line',
'.mode list',
'.mode pretty',
'.mode this_mode_does_not_exists',
'.nullvalue',
'.nullvalue ""',
'.print',
'.print hello',
'.schema osquery_info',
'.schema this_table_does_not_exist',
'.schema',
'.separator',
'.separator ,',
'.show',
'.tables osquery',
'.tables osquery_info',
'.tables this_table_does_not_exist',
'.tables',
'.trace',
'.width',
'.width 80',
'.timer',
'.timer on',
'.timer off'
]
for command in commands:
result = self.osqueryi.run_command(command)
pass
def test_time(self):
'''Demonstrating basic usage of OsqueryWrapper with the time table'''
self.osqueryi.run_command(' ') # flush error output
result = self.osqueryi.run_query(
'SELECT hour, minutes, seconds FROM time;')
self.assertEqual(len(result), 1)
row = result[0]
self.assertTrue(0 <= int(row['hour']) <= 24)
self.assertTrue(0 <= int(row['minutes']) <= 60)
self.assertTrue(0 <= int(row['seconds']) <= 60)
def test_config_bad_json(self):
self.osqueryi = test_base.OsqueryWrapper(self.binary,
args={"config_path": "/"})
result = self.osqueryi.run_query('SELECT * FROM time;')
self.assertEqual(len(result), 1)
if __name__ == '__main__':
test_base.Tester().run()
| bsd-3-clause |
Asteroid-Project/android_external_skia | platform_tools/android/gyp_gen/tool_makefile_writer.py | 4 | 3571 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code for generating Android.mk for a tool."""
import android_framework_gyp
import gypd_parser
import makefile_writer
import os
import vars_dict_lib
def write_tool_android_mk(target_dir, var_dict, place_in_local_tmp):
"""Write Android.mk for a Skia tool.
Args:
target_dir: Destination for the makefile. Must not be None.
var_dict: VarsDict containing variables for the makefile.
place_in_local_tmp: If True, the executable will be synced to
/data/local/tmp.
"""
target_file = os.path.join(target_dir, 'Android.mk')
with open(target_file, 'w') as f:
f.write(makefile_writer.AUTOGEN_WARNING)
if place_in_local_tmp:
f.write('local_target_dir := $(TARGET_OUT_DATA)/local/tmp\n')
makefile_writer.write_local_path(f)
makefile_writer.write_clear_vars(f)
makefile_writer.write_local_vars(f, var_dict, False, None)
if place_in_local_tmp:
f.write('LOCAL_MODULE_PATH := $(local_target_dir)\n')
f.write('include $(BUILD_EXECUTABLE)\n')
def generate_tool(gyp_dir, target_file, skia_trunk, dest_dir,
skia_lib_var_dict, local_module_name, local_module_tags,
place_in_local_tmp=False):
"""Common steps for building one of the skia tools.
Parse a gyp file and create an Android.mk for this tool.
Args:
gyp_dir: Directory containing gyp files.
target_file: gyp file for the project to be built, contained in gyp_dir.
skia_trunk: Trunk of Skia, used for determining the destination to write
'Android.mk'.
dest_dir: Destination for 'Android.mk', relative to skia_trunk. Used for
both writing relative paths in the makefile and for determining the
destination to write the it.
skia_lib_var_dict: VarsDict representing libskia. Used as a reference to
ensure we do not duplicate anything in this Android.mk.
local_module_name: Name for this tool, to set as LOCAL_MODULE.
local_module_tags: Tags to pass to LOCAL_MODULE_TAG.
place_in_local_tmp: If True, the executable will be synced to
/data/local/tmp.
"""
result_file = android_framework_gyp.main(target_dir=gyp_dir,
target_file=target_file,
skia_arch_type='other',
have_neon=False)
var_dict = vars_dict_lib.VarsDict()
# Add known targets from skia_lib, so we do not reparse them.
var_dict.KNOWN_TARGETS.set(skia_lib_var_dict.KNOWN_TARGETS)
gypd_parser.parse_gypd(var_dict, result_file, dest_dir)
android_framework_gyp.clean_gypd_files(gyp_dir)
var_dict.LOCAL_MODULE.add(local_module_name)
for tag in local_module_tags:
var_dict.LOCAL_MODULE_TAGS.add(tag)
# No need for defines that are already in skia_lib.
for define in skia_lib_var_dict.DEFINES:
try:
var_dict.DEFINES.remove(define)
except ValueError:
# Okay if the define was not part of the parse for our tool.
pass
if skia_trunk:
full_dest = os.path.join(skia_trunk, dest_dir)
else:
full_dest = dest_dir
# If the path does not exist, create it. This will happen during testing,
# where there is no subdirectory for each tool (just a temporary folder).
if not os.path.exists(full_dest):
os.mkdir(full_dest)
write_tool_android_mk(target_dir=full_dest, var_dict=var_dict,
place_in_local_tmp=place_in_local_tmp)
| bsd-3-clause |
cauchyturing/kaggle_diabetic_RAM | convert.py | 6 | 4457 | """Resize and crop images to square, save as tiff."""
from __future__ import division, print_function
import os
from multiprocessing.pool import Pool
import click
import numpy as np
from PIL import Image, ImageFilter
import data
N_PROC = 2
def convert(fname, crop_size):
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
h, w, _ = ba.shape
if w > 1.2 * h:
left_max = ba[:, : w // 32, :].max(axis=(0, 1)).astype(int)
right_max = ba[:, - w // 32:, :].max(axis=(0, 1)).astype(int)
max_bg = np.maximum(left_max, right_max)
foreground = (ba > max_bg + 10).astype(np.uint8)
bbox = Image.fromarray(foreground).getbbox()
if bbox is None:
print('bbox none for {} (???)'.format(fname))
else:
left, upper, right, lower = bbox
# if we selected less than 80% of the original
# height, just crop the square
if right - left < 0.8 * h or lower - upper < 0.8 * h:
print('bbox too small for {}'.format(fname))
bbox = None
else:
bbox = None
if bbox is None:
bbox = square_bbox(img)
cropped = img.crop(bbox)
resized = cropped.resize([crop_size, crop_size])
return resized
def square_bbox(img):
w, h = img.size
left = max((w - h) // 2, 0)
upper = 0
right = min(w - (w - h) // 2, w)
lower = h
return (left, upper, right, lower)
def convert_square(fname, crop_size):
img = Image.open(fname)
bbox = square_bbox(img)
cropped = img.crop(bbox)
resized = cropped.resize([crop_size, crop_size])
return resized
def get_convert_fname(fname, extension, directory, convert_directory):
return fname.replace('jpeg', extension).replace(directory,
convert_directory)
def process(args):
fun, arg = args
directory, convert_directory, fname, crop_size, extension = arg
convert_fname = get_convert_fname(fname, extension, directory,
convert_directory)
if not os.path.exists(convert_fname):
img = fun(fname, crop_size)
save(img, convert_fname)
def save(img, fname):
img.save(fname, quality=97)
@click.command()
@click.option('--directory', default='data/train', show_default=True,
help="Directory with original images.")
@click.option('--convert_directory', default='data/train_res', show_default=True,
help="Where to save converted images.")
@click.option('--test', is_flag=True, default=False, show_default=True,
help="Convert images one by one and examine them on screen.")
@click.option('--crop_size', default=256, show_default=True,
help="Size of converted images.")
@click.option('--extension', default='tiff', show_default=True,
help="Filetype of converted images.")
def main(directory, convert_directory, test, crop_size, extension):
try:
os.mkdir(convert_directory)
except OSError:
pass
filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(directory)
for f in fn if f.endswith('jpeg') or f.endswith('tiff')]
filenames = sorted(filenames)
if test:
names = data.get_names(filenames)
y = data.get_labels(names)
for f, level in zip(filenames, y):
if level == 1:
try:
img = convert(f, crop_size)
img.show()
Image.open(f).show()
real_raw_input = vars(__builtins__).get('raw_input',input)
real_raw_input('enter for next')
except KeyboardInterrupt:
exit(0)
print("Resizing images in {} to {}, this takes a while."
"".format(directory, convert_directory))
n = len(filenames)
# process in batches, sometimes weird things happen with Pool on my machine
batchsize = 500
batches = n // batchsize + 1
pool = Pool(N_PROC)
args = []
for f in filenames:
args.append((convert, (directory, convert_directory, f, crop_size,
extension)))
for i in range(batches):
print("batch {:>2} / {}".format(i + 1, batches))
pool.map(process, args[i * batchsize: (i + 1) * batchsize])
pool.close()
print('done')
if __name__ == '__main__':
main()
| mit |
AuyaJackie/odoo | addons/sale_margin/__init__.py | 441 | 1042 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/numpy/core/memmap.py | 26 | 11432 | from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import long, basestring, is_pathlib_path
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Delete the memmap instance to close.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk before removing the object.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Deletion flushes memory changes to disk before removing the object:
>>> del fp
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError("mode must be one of %s" %
(valid_filemodes + list(mode_equivalents.keys())))
if hasattr(filename, 'read'):
fid = filename
own_file = False
elif is_pathlib_path(filename):
fid = filename.open((mode == 'c' and 'r' or mode)+'b')
own_file = True
else:
fid = open(filename, (mode == 'c' and 'r' or mode)+'b')
own_file = True
if (mode == 'w+') and shape is None:
raise ValueError("shape must be given")
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if (bytes % _dbytes):
fid.close()
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = 1
for k in shape:
size *= k
bytes = long(offset + size*_dbytes)
if mode == 'w+' or (mode == 'r+' and flen < bytes):
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=array_offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if isinstance(filename, basestring):
self.filename = os.path.abspath(filename)
elif is_pathlib_path(filename):
self.filename = filename.resolve()
# py3 returns int for TemporaryFile().name
elif (hasattr(filename, "name") and
isinstance(filename.name, basestring)):
self.filename = os.path.abspath(filename.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
if own_file:
fid.close()
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None):
arr = super(memmap, self).__array_wrap__(arr, context)
# Return a memmap if a memmap was given as the output of the
# ufunc. Leave the arr class unchanged if self is not a memmap
# to keep original memmap subclasses behavior
if self is arr or type(self) is not memmap:
return arr
# Return scalar instead of 0d memmap, e.g. for np.sum with
# axis=None
if arr.shape == ():
return arr[()]
# Return ndarray otherwise
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super(memmap, self).__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
| mit |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/tests/unit/gapic/googleads.v6/services/test_mobile_app_category_constant_service.py | 1 | 31876 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v6.resources.types import mobile_app_category_constant
from google.ads.googleads.v6.services.services.mobile_app_category_constant_service import MobileAppCategoryConstantServiceClient
from google.ads.googleads.v6.services.services.mobile_app_category_constant_service import transports
from google.ads.googleads.v6.services.types import mobile_app_category_constant_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(None) is None
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_mobile_app_category_constant_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = MobileAppCategoryConstantServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = MobileAppCategoryConstantServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = MobileAppCategoryConstantServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_client_get_transport_class():
transport = MobileAppCategoryConstantServiceClient.get_transport_class()
assert transport == transports.MobileAppCategoryConstantServiceGrpcTransport
transport = MobileAppCategoryConstantServiceClient.get_transport_class("grpc")
assert transport == transports.MobileAppCategoryConstantServiceGrpcTransport
@mock.patch.object(MobileAppCategoryConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MobileAppCategoryConstantServiceClient))
def test_mobile_app_category_constant_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.MobileAppCategoryConstantServiceClient.get_transport_class') as gtc:
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = MobileAppCategoryConstantServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.MobileAppCategoryConstantServiceClient.get_transport_class') as gtc:
client = MobileAppCategoryConstantServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = MobileAppCategoryConstantServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = MobileAppCategoryConstantServiceClient()
@mock.patch.object(MobileAppCategoryConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MobileAppCategoryConstantServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_mobile_app_category_constant_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_mobile_app_category_constant_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_mobile_app_category_constant(transport: str = 'grpc', request_type=mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest):
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant(
resource_name='resource_name_value',
id=205,
name='name_value',
)
response = client.get_mobile_app_category_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, mobile_app_category_constant.MobileAppCategoryConstant)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.name == 'name_value'
def test_get_mobile_app_category_constant_from_dict():
test_get_mobile_app_category_constant(request_type=dict)
def test_get_mobile_app_category_constant_field_headers():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant()
client.get_mobile_app_category_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_mobile_app_category_constant_flattened():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_mobile_app_category_constant(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_mobile_app_category_constant_flattened_error():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_mobile_app_category_constant(
mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MobileAppCategoryConstantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.MobileAppCategoryConstantServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.MobileAppCategoryConstantServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_mobile_app_category_constant_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.MobileAppCategoryConstantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_mobile_app_category_constant',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_mobile_app_category_constant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MobileAppCategoryConstantServiceTransport()
adc.assert_called_once()
def test_mobile_app_category_constant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MobileAppCategoryConstantServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_mobile_app_category_constant_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.MobileAppCategoryConstantServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_mobile_app_category_constant_service_host_no_port():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_host_with_port():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_mobile_app_category_constant_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.MobileAppCategoryConstantServiceGrpcTransport])
def test_mobile_app_category_constant_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.MobileAppCategoryConstantServiceGrpcTransport,])
def test_mobile_app_category_constant_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_mobile_app_category_constant_path():
mobile_app_category_id = "squid"
expected = "mobileAppCategoryConstants/{mobile_app_category_id}".format(mobile_app_category_id=mobile_app_category_id, )
actual = MobileAppCategoryConstantServiceClient.mobile_app_category_constant_path(mobile_app_category_id)
assert expected == actual
def test_parse_mobile_app_category_constant_path():
expected = {
"mobile_app_category_id": "clam",
}
path = MobileAppCategoryConstantServiceClient.mobile_app_category_constant_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_mobile_app_category_constant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = MobileAppCategoryConstantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = MobileAppCategoryConstantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = MobileAppCategoryConstantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = MobileAppCategoryConstantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = MobileAppCategoryConstantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = MobileAppCategoryConstantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = MobileAppCategoryConstantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = MobileAppCategoryConstantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = MobileAppCategoryConstantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = MobileAppCategoryConstantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.MobileAppCategoryConstantServiceTransport, '_prep_wrapped_messages') as prep:
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.MobileAppCategoryConstantServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = MobileAppCategoryConstantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
40223139/2015cdaa5-12 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/pkgdata.py | 603 | 2146 | """pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return file(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
__all__ = ['getResource']
import sys
import os
#from cStringIO import StringIO
from io import StringIO
try:
# Try to use setuptools if available.
from pkg_resources import resource_stream
_have_resource_stream = True
except ImportError:
_have_resource_stream = False
def getResource(identifier, pkgname=__name__):
"""Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = getResource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
# Prefer setuptools
if _have_resource_stream:
return resource_stream(pkgname, identifier)
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except IOError:
pass
else:
return StringIO(data)
#return file(os.path.normpath(path), 'rb')
return open(os.path.normpath(path), 'rb')
| gpl-3.0 |
obspy/TauPy | taupy/tests/test_TauP_Time.py | 1 | 5761 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file tests the TauP_Time utility against the original TauPy using
both the high-level tau interface of TauPy and the java-like old script-based
interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import *
import inspect
import os
import unittest
import sys
import subprocess
from taupy.tau import TauPyModel
from taupy.TauP_Time import TauP_Time
# Most generic way to get the data folder path.
DATA = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data", "TauP_test_data")
def parse_taup_time_output(filename):
with open(filename, "rt") as fh:
data_started = False
arrivals = []
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith("-------"):
data_started = True
continue
if data_started is False:
continue
l = [_i.strip() for _i in line.split() if _i != "="]
arrivals.append({
"distance": float(l[0]),
"depth": float(l[1]),
"phase_name": str(l[2]),
"time": float(l[3]),
"ray_param": float(l[4]),
"takeoff": float(l[5]),
"incident": float(l[6]),
"purist_distance": float(l[7]),
"purist_name": str(l[8]),
})
return arrivals
def compare_arrivals_with_taup_time_output(arrivals, filename):
filename = os.path.join(DATA, filename)
expected_arrivals = parse_taup_time_output(filename)
arrivals = [
{
"distance": _i.getModuloDistDeg(),
"depth": _i.sourceDepth,
"phase_name": _i.phase.name,
"time": _i.time,
"ray_param": _i.rayParam_sec_degree,
"takeoff": _i.takeoffAngle,
"incident": _i.incidentAngle,
"purist_distance": _i.getDistDeg(),
"purist_name": _i.puristName
} for _i in arrivals]
# Sort both by time.
expected_arrivals = sorted(expected_arrivals, key=lambda x: x["time"])
arrivals = sorted(arrivals, key=lambda x: x["time"])
assert len(expected_arrivals) == len(arrivals)
for e_arr, arr in zip(expected_arrivals, arrivals):
assert sorted(e_arr.keys()) == sorted(arr.keys())
for key, value in e_arr.items():
if isinstance(value, float):
# Estimate the precision in the taup output.
v = str(value)
prec = len(v) - v.find(".") - 1
assert value == round(arr[key], prec)
else:
assert value == arr[key]
def test_all_phases_iasp91_35_deg_distance():
"""
Tests tauptime at 35 degree distance, phases ttall.
"""
model = TauPyModel("iasp91")
tts = model.get_travel_times(source_depth_in_km=10.0,
distance_in_degree=35.0)
compare_arrivals_with_taup_time_output(
tts, "taup_time_-h_10_-ph_ttall_-deg_35")
class TestTauPTime(unittest.TestCase):
# For some reason this test throws nosetests off if not in the unittest
# framwork like the test above...?
def test_all_phases_ak135_35_deg_distance(self):
"""
Tests tauptime at 35 degree distance for the ak135 model, phases ttall.
"""
model = TauPyModel("ak135")
tts = model.get_travel_times(source_depth_in_km=10.0,
distance_in_degree=35.0)
compare_arrivals_with_taup_time_output(
tts, "taup_time_-h_10_-ph_ttall_-deg_35_-mod_ak135")
def test_range(self):
"""
Check taup_time output for a range of inputs against the Java output.
"""
if not os.path.isfile("data/java_tauptime_testoutput"):
subprocess.call("./generate_tauptime_output.sh", shell=True)
stdout = sys.stdout
with open('data/taup_time_test_output', 'wt') as sys.stdout:
for degree in [0, 45, 90, 180, 360, 560]:
for depth in [0, 100, 1000, 2889]:
tauptime = TauP_Time(degrees=degree, depth=depth,
modelName="iasp91",
phaseList=["ttall"])
tauptime.run(printOutput=True)
sys.stdout = stdout
# Using ttall need to sort; or lines with same arrival times are in
# different order. With explicit names of all the phases might not be
# a problem.
subprocess.check_call("./compare_tauptime_outputs.sh", shell=True)
# Use this if lines are in same order:
#subprocess.check_call("diff -wB data/java_tauptime_testoutput "
# "taup_time_test_output", shell=True)
os.remove("data/taup_time_test_output")
def test_degree_distance_from_coords(self):
"""
Test the calculation of spherical distance from given coordinates.
"""
tt = TauP_Time(depth=143.2, phaseList=["ttall"],
coordinate_list=[13, 14, 50, 200])
tt.run()
self.assertEqual(tt.degrees, 116.77958601543997)
def test_MCM_model(self):
"""
Test Taup_Time for the MCM_MPS05_XPYQ_C1D2L_S09-M2.tvel model.
"""
mcm = TauPyModel("MCM_MPS05_XPYQ_C1D2L_S09-M2.tvel")
times = mcm.get_travel_times(300, 180)
compare_arrivals_with_taup_time_output(times,
"taup_time_MCM_testfile")
if __name__ == '__main__':
unittest.main(buffer=True)
| gpl-3.0 |
sebastian-code/portal | forum/migrations/0004_auto_20160402_1221.py | 2 | 1639 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0003_auto_20160317_2138'),
]
operations = [
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('creado_en', models.DateTimeField(auto_now_add=True)),
('modificado_en', models.DateTimeField(auto_now=True)),
('comentario', models.TextField(max_length=3000, blank=True)),
('comentador', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-creado_en',),
'verbose_name': 'Comentario',
'verbose_name_plural': 'Comentarios',
},
),
migrations.AlterModelOptions(
name='pregunta',
options={'ordering': ('tiene_respuesta', '-vistas', '-creado_en'), 'verbose_name': 'Pregunta', 'verbose_name_plural': 'Preguntas'},
),
migrations.AlterModelOptions(
name='respuesta',
options={'ordering': ('-aceptada', '-creado_en'), 'verbose_name': 'Respuesta', 'verbose_name_plural': 'Respuestas'},
),
migrations.AddField(
model_name='pregunta',
name='vistas',
field=models.IntegerField(default=0),
),
]
| bsd-3-clause |
juanyaw/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/iso2022_jp_2004.py | 61 | 1112 | #
# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
prasidh09/cse506 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
josiah-wolf-oberholtzer/supriya | supriya/ugens/dynamics.py | 1 | 3847 | import collections
from supriya import CalculationRate
from supriya.synthdefs import PseudoUGen, UGen
from .delay import DelayN
class Amplitude(UGen):
"""
An amplitude follower.
::
>>> source = supriya.ugens.In.ar(0)
>>> amplitude = supriya.ugens.Amplitude.kr(
... attack_time=0.01, release_time=0.01, source=source,
... )
>>> amplitude
Amplitude.kr()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("attack_time", 0.01), ("release_time", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Compander(UGen):
"""
A general purpose hard-knee dynamics processor.
"""
_ordered_input_names = collections.OrderedDict(
[
("source", None),
("control", 0.0),
("threshold", 0.5),
("slope_below", 1.0),
("slope_above", 1.0),
("clamp_time", 0.01),
("relax_time", 0.1),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class CompanderD(PseudoUGen):
"""
A convenience constructor for Compander.
"""
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
threshold=0.5,
clamp_time=0.01,
relax_time=0.1,
slope_above=1.0,
slope_below=1.0,
):
"""
Constructs an audio-rate dynamics processor.
.. container:: example
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> compander_d = supriya.ugens.CompanderD.ar(source=source,)
>>> supriya.graph(compander_d) # doctest: +SKIP
::
>>> print(compander_d)
synthdef:
name: d4e7b88df56af5070a88f09b0f8c633e
ugens:
- In.ar:
bus: 0.0
- DelayN.ar:
delay_time: 0.01
maximum_delay_time: 0.01
source: In.ar[0]
- Compander.ar:
clamp_time: 0.01
control: DelayN.ar[0]
relax_time: 0.1
slope_above: 1.0
slope_below: 1.0
source: In.ar[0]
threshold: 0.5
Returns ugen graph.
"""
control = DelayN.ar(
source=source, maximum_delay_time=clamp_time, delay_time=clamp_time
)
return Compander._new_expanded(
clamp_time=clamp_time,
calculation_rate=CalculationRate.AUDIO,
relax_time=relax_time,
slope_above=slope_above,
slope_below=slope_below,
source=source,
control=control,
threshold=threshold,
)
class Limiter(UGen):
"""
A peak limiter.
::
>>> source = supriya.ugens.In.ar(0)
>>> limiter = supriya.ugens.Limiter.ar(duration=0.01, level=1, source=source,)
>>> limiter
Limiter.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class Normalizer(UGen):
"""
A dynamics flattener.
::
>>> source = supriya.ugens.In.ar(0)
>>> normalizer = supriya.ugens.Normalizer.ar(duration=0.01, level=1, source=source,)
>>> normalizer
Normalizer.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
| mit |
rubikloud/gpdb | gpAux/extensions/gtest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
otmaneJai/Zipline | zipline/sources/data_frame_source.py | 26 | 5253 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
self.data = self.data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
petebachant/pyqtgraph | examples/ImageView.py | 20 | 1812 | # -*- coding: utf-8 -*-
"""
This example demonstrates the use of ImageView, which is a high-level widget for
displaying and analyzing 2D and 3D data. ImageView provides:
1. A zoomable region (ViewBox) for displaying the image
2. A combination histogram and gradient editor (HistogramLUTItem) for
controlling the visual appearance of the image
3. A timeline for selecting the currently displayed frame (for 3D data only).
4. Tools for very basic analysis of image data (see ROI and Norm buttons)
"""
## Add path to library (just for examples; you do not need this)
import initExample
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
## Create window with ImageView widget
win = QtGui.QMainWindow()
win.resize(800,800)
imv = pg.ImageView()
win.setCentralWidget(imv)
win.show()
win.setWindowTitle('pyqtgraph example: ImageView')
## Create random 3D data set with noisy signals
img = pg.gaussianFilter(np.random.normal(size=(200, 200)), (5, 5)) * 20 + 100
img = img[np.newaxis,:,:]
decay = np.exp(-np.linspace(0,0.3,100))[:,np.newaxis,np.newaxis]
data = np.random.normal(size=(100, 200, 200))
data += img * decay
data += 2
## Add time-varying signal
sig = np.zeros(data.shape[0])
sig[30:] += np.exp(-np.linspace(1,10, 70))
sig[40:] += np.exp(-np.linspace(1,10, 60))
sig[70:] += np.exp(-np.linspace(1,10, 30))
sig = sig[:,np.newaxis,np.newaxis] * 3
data[:,50:60,50:60] += sig
## Display the data and assign each frame a time value from 1.0 to 3.0
imv.setImage(data, xvals=np.linspace(1., 3., data.shape[0]))
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
balloob/home-assistant | homeassistant/components/zha/core/channels/security.py | 3 | 6589 | """
Security channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/integrations/zha/
"""
import asyncio
from zigpy.exceptions import ZigbeeException
import zigpy.zcl.clusters.security as security
from homeassistant.core import callback
from .. import registries
from ..const import (
SIGNAL_ATTR_UPDATED,
WARNING_DEVICE_MODE_EMERGENCY,
WARNING_DEVICE_SOUND_HIGH,
WARNING_DEVICE_SQUAWK_MODE_ARMED,
WARNING_DEVICE_STROBE_HIGH,
WARNING_DEVICE_STROBE_YES,
)
from .base import ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasAce.cluster_id)
class IasAce(ZigbeeChannel):
"""IAS Ancillary Control Equipment channel."""
@registries.CHANNEL_ONLY_CLUSTERS.register(security.IasWd.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasWd.cluster_id)
class IasWd(ZigbeeChannel):
"""IAS Warning Device channel."""
@staticmethod
def set_bit(destination_value, destination_bit, source_value, source_bit):
"""Set the specified bit in the value."""
if IasWd.get_bit(source_value, source_bit):
return destination_value | (1 << destination_bit)
return destination_value
@staticmethod
def get_bit(value, bit):
"""Get the specified bit from the value."""
return (value & (1 << bit)) != 0
async def issue_squawk(
self,
mode=WARNING_DEVICE_SQUAWK_MODE_ARMED,
strobe=WARNING_DEVICE_STROBE_YES,
squawk_level=WARNING_DEVICE_SOUND_HIGH,
):
"""Issue a squawk command.
This command uses the WD capabilities to emit a quick audible/visible pulse called a
"squawk". The squawk command has no effect if the WD is currently active
(warning in progress).
"""
value = 0
value = IasWd.set_bit(value, 0, squawk_level, 0)
value = IasWd.set_bit(value, 1, squawk_level, 1)
value = IasWd.set_bit(value, 3, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.squawk(value)
async def issue_start_warning(
self,
mode=WARNING_DEVICE_MODE_EMERGENCY,
strobe=WARNING_DEVICE_STROBE_YES,
siren_level=WARNING_DEVICE_SOUND_HIGH,
warning_duration=5, # seconds
strobe_duty_cycle=0x00,
strobe_intensity=WARNING_DEVICE_STROBE_HIGH,
):
"""Issue a start warning command.
This command starts the WD operation. The WD alerts the surrounding area by audible
(siren) and visual (strobe) signals.
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
6/10ths of a second.
"""
value = 0
value = IasWd.set_bit(value, 0, siren_level, 0)
value = IasWd.set_bit(value, 1, siren_level, 1)
value = IasWd.set_bit(value, 2, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.start_warning(
value, warning_duration, strobe_duty_cycle, strobe_intensity
)
@registries.BINARY_SENSOR_CLUSTERS.register(security.IasZone.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasZone.cluster_id)
class IASZoneChannel(ZigbeeChannel):
"""Channel for the IASZone Zigbee cluster."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
if command_id == 0:
state = args[0] & 3
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", 2, "zone_status", state
)
self.debug("Updated alarm state: %s", state)
elif command_id == 1:
self.debug("Enroll requested")
res = self._cluster.enroll_response(0, 0)
asyncio.create_task(res)
async def async_configure(self):
"""Configure IAS device."""
await self.get_attribute_value("zone_type", from_cache=False)
if self._ch_pool.skip_configuration:
self.debug("skipping IASZoneChannel configuration")
return
self.debug("started IASZoneChannel configuration")
await self.bind()
ieee = self.cluster.endpoint.device.application.ieee
try:
res = await self._cluster.write_attributes({"cie_addr": ieee})
self.debug(
"wrote cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
res[0],
)
except ZigbeeException as ex:
self.debug(
"Failed to write cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
str(ex),
)
try:
self.debug("Sending pro-active IAS enroll response")
await self._cluster.enroll_response(0, 0)
except ZigbeeException as ex:
self.debug(
"Failed to send pro-active IAS enroll response: %s",
str(ex),
)
self.debug("finished IASZoneChannel configuration")
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == 2:
value = value & 3
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
attributes = ["zone_status", "zone_state"]
await self.get_attributes(attributes, from_cache=from_cache)
await super().async_initialize(from_cache)
| apache-2.0 |
theblacklion/diamond-framework | tilematrix.py | 2 | 25717 | # TODO
#
# @author Oktay Acikalin <oktay.acikalin@gmail.com>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
import os
import sys
import ConfigParser
from collections import OrderedDict
from math import ceil, floor
import csv
from types import GeneratorType
from diamond import pyglet
from diamond.rect import Rect
from diamond.vault import Vault
from diamond.matrix import Matrix
from diamond.node import Node
from diamond.decorators import time
from diamond.clock import Timer
class DummyFrame(object):
rect = [0, 0, 0, 0]
class TileMatrixSector(object):
# TODO REWORK!!!
# we need an index of all the sprites we place in the vertex list.
# then we can modify vertex lists without sparse data.
# replace operation would then replace in place..
# remove operation would set the color and texture coords to 0.
# set operation would search for an empty place or add a new one.
# a periodic scan should remove all sparse data. perhaps user driven event?
def __init__(self, vaults, batch, group, matrices, matrix_size, tile_size):
super(TileMatrixSector, self).__init__()
self._vaults = vaults
self._tile_size = tile_size
self._matrices = matrices
self._matrix_size = matrix_size
self._sprite_data = sprite_data = self._gather_sprite_data(matrices, vaults)
self._vertex_lists = dict()
self._opacity = 255
self._rgb = (255, 255, 255)
self._groups = dict()
self._visible = True
for sheet, matrix in matrices.iteritems():
vault = vaults[sheet]
texture = vault.image.get_texture()
# Setup sprite group.
blend_src = pyglet.gl.GL_SRC_ALPHA
blend_dest = pyglet.gl.GL_ONE_MINUS_SRC_ALPHA
sprite_group = pyglet.sprite.SpriteGroup(texture, blend_src, blend_dest, group)
self._groups[sheet] = sprite_group
tex_coords = self._gather_tex_cords(matrix, sprite_data[sheet], texture.height)
# print len(tex_coords)
# print len(self._flat_data)
num_coords = 4 * len(matrix)
# print num_coords
# Setup vertex list.
self._batch = batch
self._vertex_lists[sheet] = self._batch.add(
num_coords, pyglet.gl.GL_QUADS, sprite_group,
'v2i/dynamic',
'c4B', ('t3f', tex_coords)
)
# Update color.
r, g, b = self._rgb
self._vertex_lists[sheet].colors[:] = [r, g, b, int(self._opacity)] * 4 * len(matrix)
# print self._vertex_list
# Setup position.
self._x = 0
self._y = 0
self._rect = Rect(
self._x,
self._y,
matrix_size[0] * tile_size[0],
matrix_size[1] * tile_size[1],
)
self._update_position()
def __del__(self):
# print('TileMatrixSector.__del__(%s)' % self)
for vertex_list in self._vertex_lists.itervalues():
if vertex_list is not None:
vertex_list.delete()
def _gather_sprite_data(self, matrices, vaults):
sprite_data = dict()
for sheet, matrix in matrices.iteritems():
vault = vaults[sheet]
result = dict()
ids = set(matrix.values())
ids.discard(-1)
for id in ids:
result[id] = vault.get_sprite(str(id)).get_action('none').get_frames()
result[-1] = [DummyFrame()]
sprite_data[sheet] = result
return sprite_data
def _gather_tex_cords(self, matrix, sprite_data, texture_height):
coords = []
for pos, id in matrix.iteritems():
frame = sprite_data[id][0] # TODO for now just take the first frame.
# print pos, id, frame
x, y, w, h = frame.rect
# print frame.rect
# Flip our y coord. TODO can't we do this somehow else?
y = texture_height - y - h
# bottom-left, bottom-right, top-right and top-left
tex_coord = (
x, y + h, 0., # bottom left
x + w, y + h, 0., # bottom right
x + w, y, 0., # top right
x, y, 0., # top left
)
# print tex_coord
coords.extend(tex_coord)
return coords
# @time
def _update_position(self):
x, y = self._x, self._y
w, h = self._tile_size
self._rect.x = x
self._rect.y = y
sprite_data = self._sprite_data
if self._visible:
for sheet, matrix in self._matrices.iteritems():
vertices = []
sprites = sprite_data[sheet]
for pos, id in matrix.iteritems():
# print frame
s_w, s_h = sprites[id][0].rect[2:]
x1 = int(x) + pos[0] * w
y1 = int(y) + pos[1] * h
x2 = x1 + s_w
y2 = y1 + s_h
vertices.extend([x1, y1, x2, y1, x2, y2, x1, y2])
self._vertex_lists[sheet].vertices[:] = vertices
else:
for sheet, matrix in self._matrices.iteritems():
vertices = []
sprites = sprite_data[sheet]
for pos, id in matrix.iteritems():
# print frame
vertices.extend([0, 0, 0, 0, 0, 0, 0, 0])
self._vertex_lists[sheet].vertices[:] = vertices
def _set_x(self, x):
if x != self._x:
self._x = x
self._update_position()
x = property(lambda self: self._x, _set_x)
def _set_y(self, y):
if y != self._y:
self._y = y
self._update_position()
y = property(lambda self: self._y, _set_y)
def set_position(self, x, y):
self._x, self._y = x, y
self._update_position()
position = property(lambda self: (self._x, self._y),
lambda self, pos: self.set_position(*pos))
rect = property(lambda self: self._rect)
# @time
def _set_visible(self, visible):
if self._visible != visible:
self._visible = visible
self._update_position()
visible = property(lambda self: self._visible, _set_visible)
@time
def set_tile(self, x, y, id):
s_x, s_y, s_w = x, y, self._matrix_size[0]
sheet, id = id.split('/')
sprite_data = self._sprite_data[sheet]
vault = self._vaults[sheet]
if id not in sprite_data:
sprite_data[id] = vault.get_sprite(str(id)).get_action('none').get_frames()
frame = sprite_data[id][0] # TODO for now just take the first frame.
# print frame
x, y, w, h = frame.rect
# print frame.rect
# bottom-left, bottom-right, top-right and top-left
tex_coord = [
x, y + h, 0., # bottom left
x + w, y + h, 0., # bottom right
x + w, y, 0., # top right
x, y, 0., # top left
]
# print 1, tex_coord
# print 2, s_x, s_y, s_w
pos = (s_w * s_y * 12 + s_x * 12)
# print 3, len(self._vertex_list.tex_coords), pos
vertex_list = self._vertex_lists[sheet]
tex_coords = vertex_list.tex_coords
tex_coords = tex_coords[:pos] + tex_coord + tex_coords[pos + 12:]
# print 4, len(tex_coords)
vertex_list.tex_coords[:] = tex_coords
class TileMatrixLayer(Node):
def __init__(self, suborder_id, vaults):
super(TileMatrixLayer, self).__init__()
# print 'TileMatrixLayer.__init__', self, suborder_id
self.order_id = suborder_id
self._vaults = vaults
self._sectors = dict()
# def _set_suborder_id(self, id):
# self._suborder_id = id
# # TODO Update group id of this node by placing id after the comma of the node.
# suborder_id = property(lambda self: self._suborder_id, _set_suborder_id)
def has_sector(self, id):
return id in self._sectors
# @time
def add_sector(self, id, x, y, matrices, matrix_size, tile_size):
batch = self.window._batch
group = self._group
# for sheet, matrix in matrices.iteritems():
# print 'sheet', sheet, matrix
# print 'sector real pos =', self._x_real, self._y_real
sector = TileMatrixSector(self._vaults, batch, group, matrices, matrix_size, tile_size)
sector.visible = self._inherited_visibility
# sector.set_position(self._x_real + x, self._y_real + y)
sector.set_position(x, y)
self._sectors[id] = (x, y, sector)
# @time
def remove_sector(self, id):
del self._sectors[id]
def _set_visible(self, visible):
super(TileMatrixLayer, self)._set_visible(visible)
for x, y, sector in self._sectors.itervalues():
sector.visible = self._inherited_visibility
# @time
# def _update_real_position(self):
# super(TileMatrixLayer, self)._update_real_position()
# # print len(self._sectors)
# for x, y, sector in self._sectors.itervalues():
# # print x, y, sector
# old_pos = sector.position
# # new_pos = self._x_real + x, self._y_real + y
# new_pos = x, y
# if new_pos != old_pos:
# sector.set_position(*new_pos)
class TileMatrix(Node):
def __init__(self):
super(TileMatrix, self).__init__()
self.__config = ConfigParser.ConfigParser()
self.__vaults = dict()
self.__default_sheet = None
self.__tile_size = 32, 32 # Never go less than 4x4 or doom awaits you!
self.__sector_size = 10, 10 # Default for visual sectors.
self.__matrix = Matrix()
# For debugging. DISABLE ME!
# self.__matrix.set_default_value({0: '72'})
self.__layers = dict()
self.__layer_config = dict()
self.__last_map_pos = (None, None), (None, None)
self.__last_matrix_rect = None
def add_sheet(self, sheet_vault, alias=None):
if not self.__vaults:
self.__tile_size = sheet_vault.tile_size
t_w, t_h = self.__tile_size
if t_w < 4 or t_h < 4:
raise Exception('Tile size cannot be smaller than 4x4. Current size: %dx%d' % (t_w, t_h))
else:
if sheet_vault.tile_size != self.__tile_size:
raise Exception('Cannot load sheet vault with incompatible tile size: %s' % sheet_vault)
vault = Vault.get_instance(sheet_vault)
alias = sheet_vault.__name__ if alias is None else alias
self.__vaults[alias] = vault
filename = os.path.relpath(sheet_vault.__file__, os.getcwd())
filename = os.path.splitext(filename)[0] # Throw away extension - Python shall decide.
if not self.__config.has_section('tilesheets'):
self.__config.add_section('tilesheets')
self.__config.set('tilesheets', alias, filename)
if not self.__default_sheet:
self.__default_sheet = self.__vaults.keys()[0]
def load_matrix(self, path):
self.__matrix.data_path = path
if not self.__config.has_section('matrix'):
self.__config.add_section('matrix')
self.__config.set('matrix', 'data_path', path)
def load_sheet_file(self, filename, alias=None):
sheet_path = os.path.dirname(filename)
sheet_file = os.path.basename(filename)
if sheet_path:
sys.path.insert(0, os.path.abspath(sheet_path))
sheet_file = os.path.splitext(sheet_file)[0] # Throw away extension - Python shall decide.
module = __import__(sheet_file, globals(), locals(), [], -1)
self.add_sheet(module, alias)
def load_config(self, filename):
# TODO do we need to reset everything here or can we block somehow if something has been set?
config = self.__config
config.read(filename)
base_dir = os.path.dirname(filename)
for section in config.sections():
if section == 'tilesheets':
for alias, filename in config.items('tilesheets'):
filename = os.path.join(base_dir, filename)
self.load_sheet_file(filename, alias)
elif section == 'matrix':
for key, val in config.items('matrix'):
if key == 'data_path':
val = os.path.join(base_dir, val)
self.load_matrix(val)
else:
raise Exception('Unknown key for section matrix found: %s' % key)
elif section == 'layer.order_change':
for z, new_z in config.items('layer.order_change'):
z = int(z)
new_z = int(new_z)
try:
self.__layer_config[z]['reorder'] = new_z
except KeyError:
self.__layer_config[z] = dict(reorder=new_z)
elif section == 'layer.sector':
for key, val in config.items('layer.sector'):
if key == 'size':
val = tuple(map(int, val.split(',')))
self.__sector_size = val
elif key == 'cache_path':
val = os.path.join(base_dir, val)
self._sector_cache_path = val
if not os.path.exists(val):
os.makedirs(val)
# We just ignore unknown sections.
# else:
# raise Exception('Unknown section in config file found: %s' % section)
# config.write(sys.stdout)
# @time
def update_sectors(self):
# timer = Timer()
# timer.start()
# Gather the boundaries.
m_x, m_y = map(float, self.real_position) # real position of tilematrix
t_w, t_h = map(float, (self.__tile_size)) # tile size
s_w, s_h = map(float, (self.__sector_size)) # sector size
w_w, w_h = map(float, (self.window.width, self.window.height)) # window size
# m_w, m_h = map(int, (ceil(w_w / t_w), ceil(w_h / t_h))) # map size
# Calculate all necessary sector rects.
top_left = map(floor, (-m_x / t_w, -m_y / t_h))
bottom_right = map(ceil, ((-m_x + w_w) / t_w, (-m_y + w_h) / t_h))
o_x, o_y = ceil(top_left[0] % s_w / s_w) * s_w, ceil(top_left[1] % s_h / s_h) * s_h
m_w, m_h = (bottom_right[0] - top_left[0] + o_x), (bottom_right[1] - top_left[1] + o_y)
# m_w, m_h = (bottom_right[0] - top_left[0]), (bottom_right[1] - top_left[1])
s_num_w, s_num_h = map(ceil, (m_w / s_w, m_h / s_h))
# print(m_x, t_w, m_y, t_h)
# print('tl =', top_left)
# print(m_x, w_w, t_w, m_y, w_h, t_h)
# print('br =', bottom_right)
# print('s_w, s_h =', s_w, s_h)
# print('o_x, o_y =', o_x, o_y)
# print('num horiz tiles =', m_w)
# print('num vert tiles =', m_h)
# print('num horiz sectors =', s_num_w)
# print('num vert sectors =', s_num_h)
# And make them ints.
top_left = map(int, top_left)
bottom_right = map(int, bottom_right)
t_w, t_h = map(int, (t_w, t_h))
m_w, m_h = map(int, (m_w, m_h))
s_w, s_h = map(int, (s_w, s_h))
s_num_w, s_num_h = map(int, (s_num_w, s_num_h))
# Guess our default if no sheet is being mentioned in a coord.
default_sheet = self.__default_sheet
# timer.stop()
# print 1, timer.result
# timer.start()
matrix_rect = (
top_left[0] // s_w * s_w,
top_left[1] // s_h * s_h,
s_w * s_num_w,
s_h * s_num_h,
)
if matrix_rect == self.__last_matrix_rect:
return
self.__last_matrix_rect = matrix_rect
# Get the rect.
matrix_layers = self.__matrix.get_rect(*matrix_rect)
# print matrix_layers
# timer.stop()
# print 2, timer.result
# timer.start()
# Separate layer and sector data.
layer_data = dict()
for layer_no, matrix in matrix_layers.iteritems():
# Ensure layer.
try:
layer_matrix = layer_data[layer_no]
except KeyError:
layer_matrix = layer_data[layer_no] = dict()
for (x, y), id in matrix.iteritems():
pos_x = x // s_w
x = x % s_w
pos_y = y // s_h
y = y % s_h
# print '*** sector', pos_x, pos_y
# Separate data.
# DISABLE THIS AFTER DEBUGGING!
# if (x, y) == (0, 0):
# col = {0: '73'}
# print x, y, col
# Normalize data format.
if '/' in id:
sheet, id = id.split('/', 1)
else:
sheet = default_sheet
# Ensure sector.
try:
sector_matrix = layer_matrix[(pos_x, pos_y)]
except KeyError:
sector_matrix = layer_matrix[(pos_x, pos_y)] = {
sheet: {
(x, y): id
}
}
else:
# Set sector data.
try:
sector_matrix[sheet][x, y] = id
except KeyError:
sector_matrix[sheet] = {(x, y): id}
# timer.stop()
# print 3, timer.result
# timer.start()
# Build layers and sectors.
layers = self.__layers
vaults = self.__vaults
required_sectors = set()
for order_id, layer_data in layer_data.items():
# print 'layer', order_id
try:
layer = layers[order_id]
except KeyError:
if order_id in self.__layer_config:
_order_id = self.__layer_config[order_id].get('reorder', order_id)
else:
_order_id = order_id
layer = layers[order_id] = TileMatrixLayer(_order_id, vaults)
# print layer
# We do this because we need a valid window and group for the next step.
self.add_node(layer)
for pos, sector_data in layer_data.iteritems():
if not layer.has_sector(pos):
# print 12345, top_left, pos
x = pos[0] * t_w * s_w
y = pos[1] * t_h * s_h
# print 'sector', pos, x, y
layer.add_sector(pos, x, y, sector_data, self.__sector_size, self.__tile_size)
required_sectors.add((order_id, pos))
# timer.stop()
# print 4, timer.result
# timer.start()
# return
# print(required_sectors)
# Cleanup sectors which are off the screen.
for order_id, layer in layers.items():
for id, data in layer._sectors.items():
x, y, sector = data
# pos = sector.position
sector_id = (order_id, (x // (t_w * s_w), y // (t_h * s_h)))
# print sector_id
if sector_id not in required_sectors:
# print('***********', id, data, pos, (w_w, w_h), (-t_w * s_w, -t_h * s_h))
# print('drop', id)
layer.remove_sector(id)
# timer.stop()
# print 5, timer.result
# @time
def rebuild(self):
# Throw away old layers.
self.remove_all()
self.__layers.clear()
self.__last_map_pos = (None, None), (None, None)
self.__last_matrix_rect = None
self.update_sectors()
# @time
def add_to(self, node):
super(TileMatrix, self).add_to(node)
if not self._child_nodes:
self.rebuild()
# self._update_real_position()
def set_sector_size(self, width, height):
self.__sector_size = width, height
if self.window:
self.rebuild()
# @time
def _update_real_position(self):
super(TileMatrix, self)._update_real_position()
if not self.window:
return
x, y = map(lambda v: -v, self.real_position)
t_w, t_h = self.__tile_size
# s_w, s_h = self.__sector_size
w = t_w # * s_w
h = t_h # * s_h
last_map_pos, last_map_coords = self.__last_map_pos
if x > last_map_coords[0]:
crp_x = ceil(x / float(w))
else:
crp_x = floor(x / float(w))
if y > last_map_coords[1]:
crp_y = ceil(y / float(h))
else:
crp_y = floor(y / float(h))
cur_map_pos = crp_x, crp_y
# print (cur_map_pos, (x, y)), self.__last_map_pos
if last_map_pos != cur_map_pos:
# print (cur_map_pos, (x, y)), self.__last_map_pos
self.update_sectors()
self.__last_map_pos = cur_map_pos, (x, y)
# @time
def get_layer(self, z):
layers = self.__layers
vaults = self.__vaults
try:
layer = layers[z]
except KeyError:
if z in self.__layer_config:
order = self.__layer_config[z].get('reorder', z)
else:
order = z
layer = layers[z] = TileMatrixLayer(order, vaults)
# print layer
self.add_node(layer)
return layer
def translate_to_pos(self, x, y):
tile_size = self.__tile_size
return x * tile_size[0], y * tile_size[1]
def get_boundaries(self):
left, top, right, bottom = self.__matrix.boundaries
t_w, t_h = self.__tile_size
return left * t_w, top * t_h, right * t_w, bottom * t_h
def get_tile_id_at(self, x, y, z):
value = self.__matrix.get_point(x, y, z)
if value is not None:
if type(value) is dict:
return value.copy()
else:
return value
return None
@time
def set_tiles_at(self, points):
if type(points) is GeneratorType:
points = set(tuple(point) for point in points)
layer_data = dict(
(z, filter(lambda point: point[2] == z, points))
for _, _, z, _ in points
)
print layer_data
for layer_no, points in layer_data.iteritems():
layer = self.get_layer(layer_no)
# TODO Generate sheet and sector data.
# TODO Modify existing sectors or create new ones.
# TODO modify matrix.
# TODO rework to react on matrix.data.saved event!
def _rebuild_index(self):
s_w, s_h = self._sector_size
print('Rebuilding matrix index...')
for root, dirs, files in os.walk(self._data_path):
for filename in files:
if filename.startswith('i.') and filename.endswith('.csv'):
os.remove(os.path.join(root, filename))
indexes = dict()
top, left, bottom, right = 0, 0, 0, 0
for root, dirs, files in os.walk(self._data_path):
files = sorted(files)
# print(root, dirs, files)
print('Found %d sectors to index...' % len(files))
for filename in files:
if not (filename.startswith('s.') and filename.endswith('.csv')):
continue
# print('Inspecting sector file: %s' % filename)
reader = csv.reader(open(os.path.join(root, filename)), skipinitialspace=True)
s_x, s_y = map(int, filename[2:-4].split(','))
for x, y, z, id in reader:
x = (s_x * s_w) + int(x)
y = (s_y * s_h) + int(y)
top = min(top, y)
left = min(left, x)
bottom = max(bottom, y)
right = max(right, x)
# TODO track z axis min and max.
sheet, tile_id = id.split('/')
# Update index of specific tile.
if id not in indexes:
id_ = '%s,%s' % (sheet, tile_id)
index_filename = os.path.join(self._data_path, 'i.%s.csv' % id_)
indexes[id] = csv.writer(open(index_filename, 'w'))
indexes[id].writerow((x, y, z))
# Update index of used tilesheet.
if sheet not in indexes:
index_filename = os.path.join(self._data_path, 'i.%s.csv' % sheet)
indexes[sheet] = csv.writer(open(index_filename, 'w'))
indexes[sheet].writerow((x, y, z, tile_id))
index_filename = os.path.join(self._data_path, 'b.csv')
writer = csv.writer(open(index_filename, 'w'))
writer.writerow((top, left, bottom, right))
self._top, self._left, self._bottom, self._right = top, left, bottom, right
print('Finished rebuilding matrix index.')
def find_in_matrix_by_tilesheet(self, value):
if '/' in value:
value = '%s,%s' % tuple(value.split('/'))
index_filename = os.path.join(self.__matrix.data_path, 'i.%s.csv' % value)
if os.path.exists(index_filename):
reader = csv.reader(open(index_filename))
return [map(int, row[:3]) + row[3:] for row in reader]
else:
return []
| mit |
40223234/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/signal.py | 743 | 1646 | """This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
| gpl-3.0 |
CDE-UNIBE/qcat | apps/search/search.py | 1 | 8779 | from functools import lru_cache
from django.conf import settings
from elasticsearch import TransportError
from questionnaire.models import Questionnaire
from .index import get_elasticsearch
from .utils import get_alias, ElasticsearchAlias
es = get_elasticsearch()
def get_es_query(
filter_params: list=None, query_string: str='',
match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. A dictionary containing the query to be passed to ES.
"""
if filter_params is None:
filter_params = []
es_queries = []
def _get_terms(qg, k, v):
return {
'terms': {
f'filter_data.{qg}__{k}': [v.lower()]
}
}
# Filter parameters: Nested subqueries to access the correct
# questiongroup.
for filter_param in list(filter_params):
if filter_param.type in [
'checkbox', 'image_checkbox', 'select_type', 'select_model',
'radio', 'bool']:
# So far, range operators only works with one filter value. Does it
# even make sense to have multiple of these joined by OR with the
# same operator?
if filter_param.operator in ['gt', 'gte', 'lt', 'lte']:
raise NotImplementedError(
'Filtering by range is not yet implemented.')
else:
if len(filter_param.values) > 1:
matches = [
_get_terms(filter_param.questiongroup,
filter_param.key, v) for v in
filter_param.values]
query = {
'bool': {
'should': matches
}
}
else:
query = _get_terms(
filter_param.questiongroup, filter_param.key,
filter_param.values[0])
es_queries.append(query)
elif filter_param.type in ['text', 'char']:
raise NotImplementedError(
'Filtering by text or char is not yet implemented/supported.')
elif filter_param.type in ['_date']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_flag']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_lang']:
es_queries.append({
'terms': {
'translations': [filter_param.values]
}
})
elif filter_param.type == '_edition':
es_queries.append({
'terms': {
'serializer_edition': [filter_param.values]
}
})
if query_string:
es_queries.append({
'multi_match': {
'query': get_escaped_string(query_string),
'fields': [
'list_data.name.*^4',
'list_data.definition.*',
'list_data.country'
],
'type': 'cross_fields',
'operator': 'and',
}
})
es_bool = 'must' if match_all is True else 'should'
if query_string == '':
# Default sort: By country, then by score.
sort = [
{
'list_data.country.keyword': {
'order': 'asc'
}
},
'_score',
]
else:
# If a phrase search is done, then only use the score to sort.
sort = ['_score']
return {
'query': {
'bool': {
es_bool: es_queries
}
},
'sort': sort,
}
def advanced_search(
filter_params: list=None, query_string: str='',
configuration_codes: list=None, limit: int=10,
offset: int=0, match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``configuration_codes`` (list): An optional list of
configuration codes to limit the search to certain indices.
``limit`` (int): A limit of query results to return.
``offset`` (int): The number of query results to skip.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. The search results as returned by
``elasticsearch.Elasticsearch.search``.
"""
query = get_es_query(
filter_params=filter_params, query_string=query_string,
match_all=match_all)
if configuration_codes is None:
configuration_codes = []
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
return es.search(index=alias, body=query, size=limit, from_=offset)
def get_aggregated_values(
questiongroup, key, filter_type, filter_params: list=None,
query_string: str='', configuration_codes: list=None,
match_all: bool=True) -> dict:
if filter_params is None:
filter_params = []
# Remove the filter_param with the current questiongroup and key from the
# list of filter_params
relevant_filter_params = [
f for f in filter_params if
f.questiongroup != questiongroup and f.key != key]
query = get_es_query(
filter_params=relevant_filter_params, query_string=query_string,
match_all=match_all)
# For text values, use the keyword. This does not work for integer values
# (the way boolean values are stored).
# https://www.elastic.co/guide/en/elasticsearch/reference/current/fielddata.html
if filter_type == 'bool':
field = f'filter_data.{questiongroup}__{key}'
else:
field = f'filter_data.{questiongroup}__{key}.keyword'
query.update({
'aggs': {
'values': {
'terms': {
'field': field,
# Limit needs to be high enough to include all values.
'size': 1000,
}
}
},
'size': 0, # Do not include the actual hits
})
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
es_query = es.search(index=alias, body=query)
buckets = es_query.get('aggregations', {}).get('values', {}).get('buckets', [])
return {b.get('key'): b.get('doc_count') for b in buckets}
def get_element(questionnaire: Questionnaire) -> dict:
"""
Get a single element from elasticsearch.
"""
alias = get_alias(
ElasticsearchAlias.from_configuration(configuration=questionnaire.configuration_object)
)
try:
return es.get_source(index=alias, id=questionnaire.pk, doc_type='questionnaire')
except TransportError:
return {}
def get_escaped_string(query_string: str) -> str:
"""
Replace all reserved characters when searching the ES index.
"""
for char in settings.ES_QUERY_RESERVED_CHARS:
query_string = query_string.replace(char, '\\{}'.format(char))
return query_string
@lru_cache(maxsize=1)
def get_indices_alias() -> list:
"""
Return a list of all elasticsearch index aliases. Only ES indices which
start with the QCAT prefix are respected. Editions are stripped away, only the 'type' of the
index / configuration is relevant.
"""
indices = []
for aliases in es.indices.get_alias('*').values():
for alias in aliases.get('aliases', {}).keys():
if settings.ES_INDEX_PREFIX not in alias:
continue
indices.append(alias.replace(settings.ES_INDEX_PREFIX, '').rsplit('_', 1)[0])
return indices
| apache-2.0 |
Lindy21/CSE498-LRS | oauth_provider/views.py | 1 | 8387 | from oauth.oauth import OAuthError
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, HttpResponseForbidden)
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import get_callable
from django.template import RequestContext
from utils import initialize_server_request, send_oauth_error
from decorators import oauth_required
from stores import check_valid_callback
from consts import OUT_OF_BAND
from django.utils.decorators import decorator_from_middleware
from django.shortcuts import render_to_response
from lrs.forms import AuthClientForm
from lrs.models import Token
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
INVALID_PARAMS_RESPONSE = send_oauth_error(OAuthError(
_('Invalid request parameters.')))
def oauth_home(request):
rsp = """
<html><head></head><body><h1>Oauth Authorize</h1></body></html>"""
return HttpResponse(rsp)
def request_token(request):
"""
The Consumer obtains an unauthorized Request Token by asking the Service
Provider to issue a Token. The Request Token's sole purpose is to receive
User approval and can only be used to obtain an Access Token.
"""
# If oauth is not enabled, don't initiate the handshake
if settings.OAUTH_ENABLED:
oauth_server, oauth_request = initialize_server_request(request)
if oauth_server is None:
return INVALID_PARAMS_RESPONSE
try:
# create a request token
token = oauth_server.fetch_request_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
else:
return HttpResponseBadRequest("OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
# tom c added login_url
@login_required(login_url="/XAPI/accounts/login")
def user_authorization(request):
"""
The Consumer cannot use the Request Token until it has been authorized by
the User.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_request_token(oauth_request)
# tom c .. we know user.. save it
token.user = request.user
token.save()
except OAuthError, err:
return send_oauth_error(err)
try:
# get the request callback, though there might not be one
callback = oauth_server.get_callback(oauth_request)
# OAuth 1.0a: this parameter should not be present on this version
if token.callback_confirmed:
return HttpResponseBadRequest("Cannot specify oauth_callback at authorization step for 1.0a protocol")
if not check_valid_callback(callback):
return HttpResponseBadRequest("Invalid callback URL")
except OAuthError:
callback = None
# OAuth 1.0a: use the token's callback if confirmed
if token.callback_confirmed:
callback = token.callback
if callback == OUT_OF_BAND:
callback = None
# entry point for the user
if request.method == 'GET':
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params)
# user grant access to the service
if request.method == 'POST':
# verify the oauth flag set in previous GET
if request.session.get('oauth', '') == token.key:
request.session['oauth'] = ''
try:
form = AuthClientForm(request.POST)
if form.is_valid():
if int(form.cleaned_data.get('authorize_access', 0)):
# authorize the token
token = oauth_server.authorize_token(token, request.user)
# return the token key
s = form.cleaned_data.get('scopes', '')
if isinstance(s, (list, tuple)):
s = ",".join([v.strip() for v in s])
# changed scope, gotta save
if s:
token.scope = s
token.save()
args = { 'token': token }
else:
args = { 'error': _('Access not granted by user.') }
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params, form)
except OAuthError, err:
response = send_oauth_error(err)
if callback:
if "?" in callback:
url_delimiter = "&"
else:
url_delimiter = "?"
if 'token' in args:
query_args = args['token'].to_string(only_key=True)
else: # access is not authorized i.e. error
query_args = 'error=%s' % args['error']
response = HttpResponseRedirect('%s%s%s' % (callback, url_delimiter, query_args))
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
callback_view = get_callable(callback_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % callback_view_str
response = callback_view(request, **args)
else:
response = send_oauth_error(OAuthError(_('Action not allowed.')))
return response
def access_token(request):
"""
The Consumer exchanges the Request Token for an Access Token capable of
accessing the Protected Resources.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_access_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
def authorize_client(request, token=None, callback=None, params=None, form=None):
if not form:
form = AuthClientForm(initial={'scopes': token.scope_to_list(),
'obj_id': token.pk})
d = {}
d['form'] = form
d['name'] = token.consumer.name
d['description'] = token.consumer.description
d['params'] = params
return render_to_response('oauth_authorize_client.html', d, context_instance=RequestContext(request))
def callback_view(request, **args):
d = {}
if 'error' in args:
d['error'] = args['error']
d['verifier'] = args['token'].verifier
return render_to_response('oauth_verifier_pin.html', args, context_instance=RequestContext(request))
| apache-2.0 |
joshuaunderwood7/HaskeLinGeom | pysrc/LG/Board.py | 1 | 2993 | def indexToLocation(x):
return ( (8-(x%8)) , (int(x/8)+1) )
class Location:
def __init__(self, x=1, y=1, z=1):
self.x = x
self.y = y
self.z = z
def parseStr(self, inStr):
inStr = inStr[1:-1]
inStr = inStr.split(',')
self.x = int(inStr[0])
self.y = int(inStr[1])
self.z = int(inStr[2])
def arrayShift(self):
self.x-=1
self.y-=1
self.z-=1
return self
def shiftBack(self):
self.x+=1
self.y+=1
self.z+=1
return self
def __repr__(self):
return '(' + str(self.x) + ', ' + \
str(self.y) + ', ' + \
str(self.z) + ')'
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
class Board:
def __init__(self, minx=1, maxx=1, miny=1, maxy=1, minz=1, maxz=1):
"""Default board is 1x1x1 and filled with #'s"""
self.minX = minx
self.maxX = maxx
self.minY = miny
self.maxY = maxy
self.minZ = minz
self.maxZ = maxz
self.locations = set()
for loc in [ (x,y,z) for z in range(minz, maxz+1) for y in range(miny, maxy+1) for x in range(minx, maxx+1)]:
self.locations.add(loc)
def fill(self, locations):
"""give a Location to assign to each square"""
self.locations.union(locations)
return self
def canAccess(self, location):
return (location in self.locations)
def get(self, location):
if self.canAccess(location):
return '#'
return ''
def set(self, location):
self.locations.add(location)
return self
def rangeOfX(self):
"""Return an eager list of X values"""
return range(self.minX, self.maxX+1)
def rangeOfY(self):
"""Return an eager list of Y values"""
return range(self.minY, self.maxY+1)
def rangeOfZ(self):
"""Return an eager list of Z values"""
return range(self.minZ, self.maxZ+1)
def __repr__(self):
returnString = "loacations = set("
for loc in self.locations:
returnString += srt(loc) + ", "
returnString += ")"
return returnString
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def getDistanceboard(self):
return Board(maxx=((self.maxX*2)-1), maxy=((self.maxY*2)-1), maxz=((self.maxZ*2)-1))
def middle(self):
"""Only returns approximate middle of the distance Board"""
return Location(self.maxX, self.maxY, self.maxZ)
chessboard = Board(maxx= 8, maxy=8)
distanceboard = chessboard.getDistanceboard()
chessboard3D = Board(maxx= 8, maxy=8, maxz=8)
| gpl-3.0 |
zombiecoincrypto/zombiecoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
sauloal/pycluster | pypy-1.9_64/lib-python/2.7/encodings/iso8859_8.py | 593 | 11292 | """ Python Character Mapping Codec iso8859_8 generated from 'MAPPINGS/ISO8859/8859-8.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe'
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u2017' # 0xDF -> DOUBLE LOW LINE
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe'
u'\ufffe'
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
rbarlow/pulp_puppet | pulp_puppet_common/pulp_puppet/common/sync_progress.py | 4 | 7722 | """
Contains classes and functions related to tracking the progress of the puppet
importer.
"""
from pulp_puppet.common import reporting
from pulp_puppet.common.constants import STATE_NOT_STARTED, STATE_SUCCESS, STATE_CANCELED
class SyncProgressReport(object):
"""
Stores the state of the sync run as it proceeds.
This object is used to update the on going progress in Pulp at appropriate intervals through
the update_progress call. Once the sync is finished, this object should be used to produce
the final report to return to Pulp describing the sync.
:ivar conduit: The repository conduit used by the sync.
:type conduit: pulp.plugins.conduits.repo_sync.RepoSyncConduit
"""
def __init__(self, conduit):
self.conduit = conduit
# Metadata download & parsing
self.metadata_state = STATE_NOT_STARTED
self.metadata_query_finished_count = None
self.metadata_query_total_count = None
self.metadata_current_query = None
self.metadata_execution_time = None
self.metadata_error_message = None
self.metadata_exception = None
self.metadata_traceback = None
# Module download
self.modules_state = STATE_NOT_STARTED
self.modules_execution_time = None
self.modules_total_count = None
self.modules_finished_count = None
self.modules_error_count = None
# list of dictionaries describing module failures. The keys are module, author, exception,
# and traceback.
self.modules_individual_errors = []
self.modules_error_message = None # overall execution error
self.modules_exception = None
self.modules_traceback = None
@classmethod
def from_progress_dict(cls, report):
"""
Parses the output from the build_progress_report method into an instance
of this class. The intention is to use this client-side to reconstruct
the instance as it is retrieved from the server.
The build_final_report call on instances returned from this call will
not function as it requires the server-side conduit to be provided.
Additionally, any exceptions and tracebacks will be a text representation
instead of formal objects.
:param report: progress report retrieved from the server's task
:type report: dict
:return: instance populated with the state in the report
:rtype: cls
"""
r = cls(None)
m = report['metadata']
r.metadata_state = m['state']
r.metadata_execution_time = m['execution_time']
r.metadata_current_query = m['current_query']
r.metadata_query_finished_count = m['query_finished_count']
r.metadata_query_total_count = m['query_total_count']
r.metadata_error_message = m['error_message']
r.metadata_exception = m['error']
r.metadata_traceback = m['traceback']
m = report['modules']
r.modules_state = m['state']
r.modules_execution_time = m['execution_time']
r.modules_total_count = m['total_count']
r.modules_finished_count = m['finished_count']
r.modules_error_count = m['error_count']
r.modules_individual_errors = m['individual_errors']
r.modules_error_message = m['error_message']
r.modules_exception = m['error']
r.modules_traceback = m['traceback']
return r
def update_progress(self):
"""
Sends the current state of the progress report to Pulp.
"""
report = self.build_progress_report()
self.conduit.set_progress(report)
def build_final_report(self):
"""
Assembles the final report to return to Pulp at the end of the sync.
The conduit will include information that it has tracked over the
course of its usage, therefore this call should only be invoked
when it is time to return the report.
:return: Returns an object representing the final report at the end of a sync.
:rtype: pulp.plugins.model.SyncReport
"""
# Report fields
total_execution_time = -1
if self.metadata_execution_time is not None and self.modules_execution_time is not None:
total_execution_time = self.metadata_execution_time + self.modules_execution_time
summary = {
'total_execution_time' : total_execution_time
}
details = {
'total_count' : self.modules_total_count,
'finished_count' : self.modules_finished_count,
'error_count' : self.modules_error_count,
}
# Determine if the report was successful or failed
all_step_states = (self.metadata_state, self.modules_state)
unsuccessful_steps = [s for s in all_step_states if s != STATE_SUCCESS]
if len(unsuccessful_steps) == 0:
report = self.conduit.build_success_report(summary, details)
else:
report = self.conduit.build_failure_report(summary, details)
if self.metadata_state == STATE_CANCELED:
report.canceled_flag = True
return report
def build_progress_report(self):
"""
Returns the actual report that should be sent to Pulp as the current
progress of the sync.
:return: description of the current state of the sync
:rtype: dict
"""
report = {
'metadata' : self._metadata_section(),
'modules' : self._modules_section(),
}
return report
def add_failed_module(self, module, exception, traceback):
"""
Updates the progress report that a module failed to be imported.
:param module: The module being processed when the failure occurred
:type module: pulp_puppet.plugins.db.models.Module
:param exception: The exception related to the module failure
:type exception: exception
:param traceback: The traceback corresponding with the exception
:type traceback: traceback
"""
self.modules_error_count += 1
error_dict = {
'module': '%s-%s' % (module.name, module.version),
'author': module.author,
'exception': reporting.format_exception(exception),
'traceback': reporting.format_traceback(traceback),
}
self.modules_individual_errors.append(error_dict)
def _metadata_section(self):
metadata_report = {
'state' : self.metadata_state,
'execution_time' : self.metadata_execution_time,
'current_query' : self.metadata_current_query,
'query_finished_count' : self.metadata_query_finished_count,
'query_total_count' : self.metadata_query_total_count,
'error_message' : self.metadata_error_message,
'error' : reporting.format_exception(self.metadata_exception),
'traceback' : reporting.format_traceback(self.metadata_traceback),
}
return metadata_report
def _modules_section(self):
modules_report = {
'state' : self.modules_state,
'execution_time' : self.modules_execution_time,
'total_count' : self.modules_total_count,
'finished_count' : self.modules_finished_count,
'error_count' : self.modules_error_count,
'individual_errors' : self.modules_individual_errors,
'error_message' : self.modules_error_message,
'error' : reporting.format_exception(self.modules_exception),
'traceback' : reporting.format_traceback(self.modules_traceback),
}
return modules_report
| gpl-2.0 |
carlohamalainen/nipype | examples/rsfmri_fsl_compcorr.py | 5 | 3541 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
==============================
rsfMRI: FSL, Nipype, tCompCorr
==============================
Performs preprocessing for resting state data based on the tCompCorr method
described in Behzadi et al. (2007).
Tell python where to find the appropriate functions.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.utility as util
#####################################################################
# Preliminaries
from nipype.workflows.fmri.fsl import create_resting_preproc
"""
Set up parameters for the resting state preprocessing workflow.
"""
TR = 3.0
restingflow = create_resting_preproc()
restingflow.inputs.inputspec.num_noise_components = 6
restingflow.inputs.inputspec.highpass_sigma = 100/(2*TR)
restingflow.inputs.inputspec.lowpass_sigma = 12.5/(2*TR)
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1']
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Preprocessing pipeline nodes
----------------------------
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name = 'datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3',]]])
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Store significant result-files in a special directory
"""
datasink = pe.Node(interface=nio.DataSink(parameterization=False),
name='datasink')
datasink.inputs.base_directory = os.path.abspath('./fslresting/compcorred')
"""
Set up complete workflow
------------------------
"""
def get_substitutions(subject_id):
'''Replace output names of files with more meaningful ones
'''
return [('vol0000_warp_merged_detrended_regfilt_filt',
'%s_filtered'%subject_id),
('vol0000_warp_merged_tsnr_stddev_thresh',
'%s_noisyvoxels'%subject_id)]
l1pipeline = pe.Workflow(name= "resting")
l1pipeline.base_dir = os.path.abspath('./fslresting/workingdir')
l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, restingflow, [('func', 'inputspec.func')]),
(infosource, datasink, [('subject_id', 'container'),
(('subject_id', get_substitutions),
'substitutions')]),
(restingflow, datasink, [('outputspec.noise_mask_file',
'@noisefile'),
('outputspec.filtered_file',
'@filteredfile')])
])
if __name__ == '__main__':
l1pipeline.run()
l1pipeline.write_graph()
| bsd-3-clause |
sole/high-fidelity | test/marionette/test_app.py | 1 | 2990 | from unittest import skip
from gaiatest import GaiaTestCase
class TestApp(GaiaTestCase):
"""Test standard app functionality like menu bar and tab-switching."""
popular_tab = ('css selector', '#popular-tab-container')
popular_tab_link = ('css selector', '#popular-tab a')
search_input = ('id', 'podcast-search')
search_tab = ('css selector', '#search-tab-container')
search_tab_link = ('css selector', '#search-tab a')
def setUp(self):
"""Run the standard Gaia setUp and open Podcasts for every test."""
GaiaTestCase.setUp(self)
# Launch the app!
self.app = self.apps.launch('Podcasts')
# Popular podcasts are on hold while we look for a new API.
@skip('Feature disabled until "popular" API/service is found.')
def test_popular_tab_exists(self):
"""Test the "Top Podcasts" tab.
Make sure activating the popular tab works and that the appropriate
DOM elements are in place.
"""
# Make sure the popular podcasts tab exists.
self.wait_for_element_displayed(*self.popular_tab_link)
popular_tab_link_element = self.marionette.find_element(
*self.popular_tab_link)
self.assertEqual(popular_tab_link_element.text, 'Popular',
'Popular tab link should exist')
# Clicking on the popular tab link should open the popular tab.
self.marionette.tap(popular_tab_link_element)
self.wait_for_element_displayed(*self.popular_tab)
self.assertTrue(self.marionette.find_element(*self.popular_tab)
.is_displayed(),
'Popular podcasts tab should appear when link is '
'tapped')
def test_search_tab_exists(self):
"""Test the Podcast search tab.
Make sure activating the search tab works and that the appropriate
DOM elements are in place.
"""
# Make sure the search tab exists.
self.wait_for_element_displayed(*self.search_tab_link)
search_tab_link_element = self.marionette.find_element(
*self.search_tab_link)
self.assertEqual(search_tab_link_element.text, 'Search',
'Search tab link should exist')
# Clicking on the search tab link should open the search tab.
self.marionette.tap(search_tab_link_element)
self.wait_for_element_displayed(*self.search_tab)
self.assertTrue(self.marionette.find_element(*self.search_tab)
.is_displayed(),
'Search tab should appear when link is tapped')
# Search field should have a placeholder value.
self.wait_for_element_displayed(*self.search_input)
self.assertTrue(self.marionette.find_element(*self.search_input)
.get_attribute('placeholder'),
'Search field should have a placeholder')
| mit |
waltervh/BornAgain | Wrap/swig/doxy2swig.py | 2 | 12248 | #!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
Usage:
doxy2swig.py input.xml output.i
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# update 01.03.2016:
# This version has been modified by Jonathan Fisher (j.fisher@fz-juelich.de)
# to be forwards-compatible with Python 3
from __future__ import print_function
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest, mode='w'):
if hasattr(dest, "write"):
return dest
else:
return open(dest, mode)
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src):
"""Initialize the instance given a source object (file or
filename).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ('inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref')
#self.generics = []
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
txt = node.data
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
#self.add_text(textwrap.fill(txt))
self.add_text(txt)
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
#if type(value) in (types.ListType, types.TupleType):
if type(value) in (list, tuple):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
do_formula = space_parse
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
self.add_text('%%feature("docstring") %s "\n'%data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
#if first.has_key(n):
if n in first:
self.parse(first[n])
self.add_text(['";','\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
self.add_text('C++ includes: ')
self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
self.add_text(['\n', '\n', 'Parameters:', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
self.add_text("%s: "%node.firstChild.data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
defn = first['definition'].firstChild.data
self.add_text('\n')
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(ns, name, defn))
else:
self.add_text(' %s "\n%s'%(name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func'):
self.generic_parse(node)
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
self.generic_parse(node, pad=1)
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
print("parsing file: %s"%fname)
p = Doxy2SWIG(fname)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname, mode='w'):
o = my_open_write(fname, mode)
if self.multi:
o.write("".join(self.pieces))
else:
o.write("".join(self.clean_pieces(self.pieces)))
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:':
ret.extend(['Parameters:\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
#_tmp = textwrap.fill(i.strip())
_tmp = i.strip()
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def main(input, output):
p = Doxy2SWIG(input)
p.generate()
p.write(output)
if __name__ == '__main__':
if len(sys.argv) != 3:
print(__doc__)
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| gpl-3.0 |
chevanlol360/Kernel_LGE_Fx1 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/223_test_poll.py | 10 | 4347 | # Test case for the os.poll() function
import sys, os, select, random
from test_support import verify, verbose, TestSkipped, TESTFN
try:
select.poll
except AttributeError:
raise TestSkipped, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
def test_poll1():
"""Basic functional test of poll object
Create a bunch of pipe and test that poll works with them.
"""
print 'Running poll test 1'
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
verify(len(buf) == MSG_LEN)
print buf
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
poll_unit_tests()
print 'Poll test 1 complete'
def poll_unit_tests():
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
verify(r[0] == (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
verify(r[0][0] == fd)
f.close()
r = p.poll()
verify(r[0] == (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
try:
p.register(p)
except TypeError:
pass
else:
print "Bogus register call did not raise TypeError"
try:
p.unregister(p)
except TypeError:
pass
else:
print "Bogus unregister call did not raise TypeError"
# can't unregister non-existent object
p = select.poll()
try:
p.unregister(3)
except KeyError:
pass
else:
print "Bogus unregister call did not raise KeyError"
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
try:
pollster.register( Nope(), 0 )
except TypeError: pass
else: print 'expected TypeError exception, not raised'
try:
pollster.register( Almost(), 0 )
except TypeError: pass
else: print 'expected TypeError exception, not raised'
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2():
print 'Running poll test 2'
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
if verbose:
print 'timeout =', tout
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
print 'error: pipe seems to be closed, but still returns data'
continue
elif flags & select.POLLIN:
line = p.readline()
if verbose:
print `line`
if not line:
if verbose:
print 'EOF'
break
continue
else:
print 'Unexpected return value from select.poll:', fdlist
p.close()
print 'Poll test 2 complete'
test_poll1()
test_poll2()
| gpl-3.0 |
DONIKAN/django | django/utils/numberformat.py | 431 | 1944 | from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.utils import six
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping > 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(six.text_type(number))
# sign
sign = ''
if isinstance(number, Decimal):
str_number = '{:f}'.format(number)
else:
str_number = six.text_type(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part:
dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep[::-1]
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| bsd-3-clause |
kalahbrown/HueBigSQL | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/dump_worksheet.py | 61 | 8158 | # file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from ..cell import column_index_from_string, get_column_letter, Cell
from ..worksheet import Worksheet
from ..shared.xmltools import XMLGenerator, get_document_content, \
start_tag, end_tag, tag
from ..shared.date_time import SharedDate
from ..shared.ooxml import MAX_COLUMN, MAX_ROW
from tempfile import NamedTemporaryFile
from ..writer.excel import ExcelWriter
from ..writer.strings import write_string_table
from ..writer.styles import StyleWriter
from ..style import Style, NumberFormat
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`openpyxl.workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook):
Worksheet.__init__(self, parent_workbook)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.header', delete=False)
self._fileobj_content = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.content', delete=False)
self._fileobj = NamedTemporaryFile(mode='w', prefix='openpyxl.', delete=False)
self.doc = XMLGenerator(self._fileobj_content, 'utf-8')
self.header = XMLGenerator(self._fileobj_header, 'utf-8')
self.title = 'Sheet'
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
@property
def filename(self):
return self._fileobj.name
def write_header(self):
doc = self.header
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._close_header()
self._write_fileobj(self._fileobj_header)
self._write_fileobj(self._fileobj_content)
self._fileobj.close()
def _write_fileobj(self, fobj):
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
os.remove(fobj.name)
self._fileobj.flush()
def _close_header(self):
doc = self.header
#doc.endDocument()
def _close_content(self):
doc = self.doc
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
#doc.endDocument()
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self.doc
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx+1), row_idx)
attributes = {'r': coordinate}
if isinstance(cell, bool):
dtype = 'boolean'
elif isinstance(cell, (int, float)):
dtype = 'numeric'
elif isinstance(cell, (datetime.datetime, datetime.date)):
dtype = 'datetime'
cell = self._shared_date.datetime_to_julian(cell)
attributes['s'] = STYLES[dtype]['style']
elif cell and cell[0] == '=':
dtype = 'formula'
else:
dtype = 'string'
cell = self._string_builder.add(cell)
attributes['t'] = STYLES[dtype]['type']
start_tag(doc, 'c', attributes)
if dtype == 'formula':
tag(doc, 'f', body = '%s' % cell[1:])
tag(doc, 'v')
else:
tag(doc, 'v', body = '%s' % cell)
end_tag(doc, 'c')
end_tag(doc, 'row')
def save_dump(workbook, filename):
writer = ExcelDumpWriter(workbook)
writer.save(filename)
return True
class ExcelDumpWriter(ExcelWriter):
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleDumpWriter(workbook)
self.style_writer._style_list.append(DATETIME_STYLE)
def _write_string_table(self, archive):
shared_string_table = self.workbook.strings_table_builder.get_table()
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
for i, sheet in enumerate(self.workbook.worksheets):
sheet.write_header()
sheet.close()
archive.write(sheet.filename, PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1))
os.remove(sheet.filename)
class StyleDumpWriter(StyleWriter):
def _get_style_list(self, workbook):
return []
| apache-2.0 |
nitin-cherian/LifeLongLearning | Python/Experiments/JINJA/RealPython/jinja_env/lib/python3.5/site-packages/flask/json.py | 121 | 9183 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import date
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
from itsdangerous import json as _json
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
)
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| mit |
urosgruber/dd-agent | tests/checks/integration/test_go_expvar.py | 46 | 3078 | # stdlib
from collections import defaultdict
import time
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
@attr(requires='go_expvar')
class TestGoExpVar(AgentCheckTest):
CHECK_NAME = 'go_expvar'
CHECK_GAUGES = [
'go_expvar.memstats.alloc',
'go_expvar.memstats.heap_alloc',
'go_expvar.memstats.heap_idle',
'go_expvar.memstats.heap_inuse',
'go_expvar.memstats.heap_objects',
'go_expvar.memstats.heap_released',
'go_expvar.memstats.heap_sys',
'go_expvar.memstats.total_alloc',
]
CHECK_GAUGES_DEFAULT = [
'go_expvar.memstats.pause_ns.95percentile',
'go_expvar.memstats.pause_ns.avg',
'go_expvar.memstats.pause_ns.count',
'go_expvar.memstats.pause_ns.max',
'go_expvar.memstats.pause_ns.median',
]
CHECK_GAUGES_CUSTOM = {'go_expvar.last_user': '123456'}
CHECK_RATES = [
'go_expvar.memstats.frees',
'go_expvar.memstats.lookups',
'go_expvar.memstats.mallocs',
'go_expvar.memstats.num_gc',
'go_expvar.memstats.pause_total_ns',
]
CHECK_RATES_CUSTOM = {'go_expvar.num_calls': 0}
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {
"instances": [{
"expvar_url": 'http://localhost:8079/debug/vars',
'tags': ['my_tag'],
'metrics': [
{
'path': 'last_user'
},
{
'path': 'num_calls',
"type": "rate"
},
]
}]
}
def _run_check_twice(self):
# To avoid the disparition of some gauges during the second check
mocks = {}
config = self.config
expvar_url = self.config['instances'][0]['expvar_url']
fake_last_gc_count = defaultdict(int)
mocks['_last_gc_count'] = fake_last_gc_count
# Can't use run_check_twice due to specific metrics
self.run_check(config, mocks=mocks)
time.sleep(1)
# Reset it
del fake_last_gc_count[expvar_url]
self.run_check(config, mocks=mocks)
# Real integration test
def test_go_expvar(self):
self._run_check_twice()
shared_tags = [
'my_tag',
'expvar_url:{0}'.format(self.config['instances'][0]['expvar_url'])
]
for gauge in self.CHECK_GAUGES + self.CHECK_GAUGES_DEFAULT:
self.assertMetric(gauge, count=1, tags=shared_tags)
for rate in self.CHECK_RATES:
self.assertMetric(rate, count=1, tags=shared_tags)
for gauge, value in self.CHECK_GAUGES_CUSTOM.iteritems():
self.assertMetric(gauge, count=1, value=value, tags=shared_tags)
for rate, value in self.CHECK_RATES_CUSTOM.iteritems():
self.assertMetric(rate, count=1, value=value, tags=shared_tags)
self.coverage_report()
| bsd-3-clause |
i19870503/i19870503 | Python/eggnog2go_anno.py | 1 | 2591 | import os
import re
import pandas as pd
import string
import itertools
import numpy as np
import sys
import argparse
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create GO annotation and enrichment file')
parser.add_argument('-i',type=str,dest='infile',required=True,help="Input file")
parser.add_argument('-o',type=str,dest='out',required=True,help="Ouput file")
parser.add_argument('-db',type=str,dest='db',required=True,help="GO Database file")
args = parser.parse_args()
print (args)
def sort_uniq(sequence):
return (x[0] for x in itertools.groupby(sorted(sequence)))
path = "/home/zluna/Work/GO"
fout = open(args.out+"_anno.xls", 'w')
print("Gene_id", "GO_annotation", sep = '\t', file = fout)
go_db = pd.read_table(os.path.join(path, args.db), header = None)
eggout = pd.read_table(os.path.join(path, args.infile), header = None)
#pd.DataFrame.head(eggout)
#eggout.head(100)
dict = OrderedDict()
first_flag = 1
a = list(go_db[0])
for i in range(len(eggout)):
gene_id = eggout[0][i]
go_id = eggout[5][i]
if pd.isnull(eggout[5][i]):
go_id = ''
#print(gene_id, kegg_id, type(kegg_id), sep ='\t')
go_id = go_id.split(',')
if len(go_id) == 0:
continue
go_term = '; '.join(list(go_db[go_db[2].isin(go_id)][0]))
#print(gene_id, go_id, go_term, sep ='\t')
go_sum = []
sel_go_table = go_db[go_db[2].isin(go_id)]
for j in range(len(sel_go_table)):
go_sum.append(''.join(( list(sel_go_table[2])[j], "~", list(sel_go_table[0])[j])))
print(gene_id, str(go_sum).strip('[]').replace(']','').replace("'","").replace(", ","; "), sep = '\t', file = fout)
a = list(go_db[2])
### Use dictionary
for k in range(len(a)):
if str(go_sum).find(a[k]) != -1 :
if a[k] not in dict.keys():
### The value must be list type, if just give the 'gene_id' as the value of key, it can not use 'append' method to add the new 'gene_id' to the existing key.
dict[a[k]] = []
dict[a[k]].append(gene_id)
else:
dict[a[k]].append(gene_id)
#dict[a[j]] = [dict[a[j]], gene_id]
fout.close()
fout2 = open(args.out+"_enrich.xls", 'w')
print('GOID', 'Term', 'Genes', 'Gene_count', sep = '\t', file = fout2)
for key,values in dict.items():
print(key, list(go_db[go_db[2] == key][0]), str(values).strip('[]').replace(']','').replace("'",""), len(values), sep ='\t', file = fout2)
fout2.cloes()
| gpl-2.0 |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/parallel/apps/ipcontrollerapp.py | 2 | 20434 | #!/usr/bin/env python
# encoding: utf-8
"""
The IPython controller application.
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
import json
import os
import stat
import sys
from multiprocessing import Process
from signal import signal, SIGINT, SIGABRT, SIGTERM
import zmq
from zmq.devices import ProcessMonitoredQueue
from zmq.log.handlers import PUBHandler
from IPython.core.profiledir import ProfileDir
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
catch_config_error,
)
from IPython.utils.importstring import import_item
from IPython.utils.localinterfaces import LOCALHOST, PUBLIC_IPS
from IPython.utils.traitlets import Instance, Unicode, Bool, List, Dict, TraitError
from IPython.kernel.zmq.session import (
Session, session_aliases, session_flags, default_secure
)
from IPython.parallel.controller.heartmonitor import HeartMonitor
from IPython.parallel.controller.hub import HubFactory
from IPython.parallel.controller.scheduler import TaskScheduler,launch_scheduler
from IPython.parallel.controller.dictdb import DictDB
from IPython.parallel.util import split_url, disambiguate_url, set_hwm
# conditional import of SQLiteDB / MongoDB backend class
real_dbs = []
try:
from IPython.parallel.controller.sqlitedb import SQLiteDB
except ImportError:
pass
else:
real_dbs.append(SQLiteDB)
try:
from IPython.parallel.controller.mongodb import MongoDB
except ImportError:
pass
else:
real_dbs.append(MongoDB)
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
_description = """Start the IPython controller for parallel computing.
The IPython controller provides a gateway between the IPython engines and
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
your ipython directory and named as "profile_name". See the `profile`
and `profile-dir` options for details.
"""
_examples = """
ipcontroller --ip=192.168.0.1 --port=1000 # listen on ip, port for engines
ipcontroller --scheme=pure # use the pure zeromq scheduler
"""
#-----------------------------------------------------------------------------
# The main application
#-----------------------------------------------------------------------------
flags = {}
flags.update(base_flags)
flags.update({
'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
'Use threads instead of processes for the schedulers'),
'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}},
'use the SQLiteDB backend'),
'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}},
'use the MongoDB backend'),
'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}},
'use the in-memory DictDB backend'),
'nodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.NoDB'}},
"""use dummy DB backend, which doesn't store any information.
This is the default as of IPython 0.13.
To enable delayed or repeated retrieval of results from the Hub,
select one of the true db backends.
"""),
'reuse' : ({'IPControllerApp' : {'reuse_files' : True}},
'reuse existing json connection files'),
'restore' : ({'IPControllerApp' : {'restore_engines' : True, 'reuse_files' : True}},
'Attempt to restore engines from a JSON file. '
'For use when resuming a crashed controller'),
})
flags.update(session_flags)
aliases = dict(
ssh = 'IPControllerApp.ssh_server',
enginessh = 'IPControllerApp.engine_ssh_server',
location = 'IPControllerApp.location',
url = 'HubFactory.url',
ip = 'HubFactory.ip',
transport = 'HubFactory.transport',
port = 'HubFactory.regport',
ping = 'HeartMonitor.period',
scheme = 'TaskScheduler.scheme_name',
hwm = 'TaskScheduler.hwm',
)
aliases.update(base_aliases)
aliases.update(session_aliases)
class IPControllerApp(BaseParallelApplication):
name = u'ipcontroller'
description = _description
examples = _examples
classes = [ProfileDir, Session, HubFactory, TaskScheduler, HeartMonitor, DictDB] + real_dbs
# change default to True
auto_create = Bool(True, config=True,
help="""Whether to create profile dir if it doesn't exist.""")
reuse_files = Bool(False, config=True,
help="""Whether to reuse existing json connection files.
If False, connection files will be removed on a clean exit.
"""
)
restore_engines = Bool(False, config=True,
help="""Reload engine state from JSON file
"""
)
ssh_server = Unicode(u'', config=True,
help="""ssh url for clients to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
engine_ssh_server = Unicode(u'', config=True,
help="""ssh url for engines to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
location = Unicode(u'', config=True,
help="""The external IP or domain name of the Controller, used for disambiguating
engine and client connections.""",
)
import_statements = List([], config=True,
help="import statements to be run at startup. Necessary in some environments"
)
use_threads = Bool(False, config=True,
help='Use threads instead of processes for the schedulers',
)
engine_json_file = Unicode('ipcontroller-engine.json', config=True,
help="JSON filename where engine connection info will be stored.")
client_json_file = Unicode('ipcontroller-client.json', config=True,
help="JSON filename where client connection info will be stored.")
def _cluster_id_changed(self, name, old, new):
super(IPControllerApp, self)._cluster_id_changed(name, old, new)
self.engine_json_file = "%s-engine.json" % self.name
self.client_json_file = "%s-client.json" % self.name
# internal
children = List()
mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
def _use_threads_changed(self, name, old, new):
self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
write_connection_files = Bool(True,
help="""Whether to write connection files to disk.
True in all cases other than runs with `reuse_files=True` *after the first*
"""
)
aliases = Dict(aliases)
flags = Dict(flags)
def save_connection_dict(self, fname, cdict):
"""save a connection dict to json file."""
c = self.config
url = cdict['registration']
location = cdict['location']
if not location:
if PUBLIC_IPS:
location = PUBLIC_IPS[-1]
else:
self.log.warn("Could not identify this machine's IP, assuming %s."
" You may need to specify '--location=<external_ip_address>' to help"
" IPython decide when to connect via loopback." % LOCALHOST)
location = LOCALHOST
cdict['location'] = location
fname = os.path.join(self.profile_dir.security_dir, fname)
self.log.info("writing connection info to %s", fname)
with open(fname, 'w') as f:
f.write(json.dumps(cdict, indent=2))
os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ecfg = json.loads(f.read())
# json gives unicode, Session.key wants bytes
c.Session.key = ecfg['key'].encode('ascii')
xport,ip = ecfg['interface'].split('://')
c.HubFactory.engine_ip = ip
c.HubFactory.engine_transport = xport
self.location = ecfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = ecfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ccfg = json.loads(f.read())
for key in ('key', 'registration', 'pack', 'unpack', 'signature_scheme'):
assert ccfg[key] == ecfg[key], "mismatch between engine and client info: %r" % key
xport,addr = ccfg['interface'].split('://')
c.HubFactory.client_transport = xport
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = ccfg['ssh']
# load port config:
c.HubFactory.regport = ecfg['registration']
c.HubFactory.hb = (ecfg['hb_ping'], ecfg['hb_pong'])
c.HubFactory.control = (ccfg['control'], ecfg['control'])
c.HubFactory.mux = (ccfg['mux'], ecfg['mux'])
c.HubFactory.task = (ccfg['task'], ecfg['task'])
c.HubFactory.iopub = (ccfg['iopub'], ecfg['iopub'])
c.HubFactory.notifier_port = ccfg['notification']
def cleanup_connection_files(self):
if self.reuse_files:
self.log.debug("leaving JSON connection files for reuse")
return
self.log.debug("cleaning up JSON connection files")
for f in (self.client_json_file, self.engine_json_file):
f = os.path.join(self.profile_dir.security_dir, f)
try:
os.remove(f)
except Exception as e:
self.log.error("Failed to cleanup connection file: %s", e)
else:
self.log.debug(u"removed %s", f)
def load_secondary_config(self):
"""secondary config, loading from JSON and setting defaults"""
if self.reuse_files:
try:
self.load_config_from_json()
except (AssertionError,IOError) as e:
self.log.error("Could not load config from JSON: %s" % e)
else:
# successfully loaded config from JSON, and reuse=True
# no need to wite back the same file
self.write_connection_files = False
# switch Session.key default to secure
default_secure(self.config)
self.log.debug("Config changed")
self.log.debug(repr(self.config))
def init_hub(self):
c = self.config
self.do_import_statements()
try:
self.factory = HubFactory(config=c, log=self.log)
# self.start_logging()
self.factory.init_hub()
except TraitError:
raise
except Exception:
self.log.error("Couldn't construct the Controller", exc_info=True)
self.exit(1)
if self.write_connection_files:
# save to new json config files
f = self.factory
base = {
'key' : f.session.key.decode('ascii'),
'location' : self.location,
'pack' : f.session.packer,
'unpack' : f.session.unpacker,
'signature_scheme' : f.session.signature_scheme,
}
cdict = {'ssh' : self.ssh_server}
cdict.update(f.client_info)
cdict.update(base)
self.save_connection_dict(self.client_json_file, cdict)
edict = {'ssh' : self.engine_ssh_server}
edict.update(f.engine_info)
edict.update(base)
self.save_connection_dict(self.engine_json_file, edict)
fname = "engines%s.json" % self.cluster_id
self.factory.hub.engine_state_file = os.path.join(self.profile_dir.log_dir, fname)
if self.restore_engines:
self.factory.hub._load_engine_state()
def init_schedulers(self):
children = self.children
mq = import_item(str(self.mq_class))
f = self.factory
ident = f.session.bsession
# disambiguate url, in case of *
monitor_url = disambiguate_url(f.monitor_url)
# maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
# IOPub relay (in a Process)
q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub')
q.bind_in(f.client_url('iopub'))
q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
q.bind_out(f.engine_url('iopub'))
q.setsockopt_out(zmq.SUBSCRIBE, b'')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Multiplexer Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
q.bind_in(f.client_url('mux'))
q.setsockopt_in(zmq.IDENTITY, b'mux_in')
q.bind_out(f.engine_url('mux'))
q.setsockopt_out(zmq.IDENTITY, b'mux_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Control Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
q.bind_in(f.client_url('control'))
q.setsockopt_in(zmq.IDENTITY, b'control_in')
q.bind_out(f.engine_url('control'))
q.setsockopt_out(zmq.IDENTITY, b'control_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
try:
scheme = self.config.TaskScheduler.scheme_name
except AttributeError:
scheme = TaskScheduler.scheme_name.get_default_value()
# Task Queue (in a Process)
if scheme == 'pure':
self.log.warn("task::using pure DEALER Task scheduler")
q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
# q.setsockopt_out(zmq.HWM, hub.hwm)
q.bind_in(f.client_url('task'))
q.setsockopt_in(zmq.IDENTITY, b'task_in')
q.bind_out(f.engine_url('task'))
q.setsockopt_out(zmq.IDENTITY, b'task_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
elif scheme == 'none':
self.log.warn("task::using no Task scheduler")
else:
self.log.info("task::using Python %s Task scheduler"%scheme)
sargs = (f.client_url('task'), f.engine_url('task'),
monitor_url, disambiguate_url(f.client_url('notification')),
disambiguate_url(f.client_url('registration')),
)
kwargs = dict(logname='scheduler', loglevel=self.log_level,
log_url = self.log_url, config=dict(self.config))
if 'Process' in self.mq_class:
# run the Python scheduler in a Process
q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
q.daemon=True
children.append(q)
else:
# single-threaded Controller
kwargs['in_thread'] = True
launch_scheduler(*sargs, **kwargs)
# set unlimited HWM for all relay devices
if hasattr(zmq, 'SNDHWM'):
q = children[0]
q.setsockopt_in(zmq.RCVHWM, 0)
q.setsockopt_out(zmq.SNDHWM, 0)
for q in children[1:]:
if not hasattr(q, 'setsockopt_in'):
continue
q.setsockopt_in(zmq.SNDHWM, 0)
q.setsockopt_in(zmq.RCVHWM, 0)
q.setsockopt_out(zmq.SNDHWM, 0)
q.setsockopt_out(zmq.RCVHWM, 0)
q.setsockopt_mon(zmq.SNDHWM, 0)
def terminate_children(self):
child_procs = []
for child in self.children:
if isinstance(child, ProcessMonitoredQueue):
child_procs.append(child.launcher)
elif isinstance(child, Process):
child_procs.append(child)
if child_procs:
self.log.critical("terminating children...")
for child in child_procs:
try:
child.terminate()
except OSError:
# already dead
pass
def handle_signal(self, sig, frame):
self.log.critical("Received signal %i, shutting down", sig)
self.terminate_children()
self.loop.stop()
def init_signal(self):
for sig in (SIGINT, SIGABRT, SIGTERM):
signal(sig, self.handle_signal)
def do_import_statements(self):
statements = self.import_statements
for s in statements:
try:
self.log.msg("Executing statement: '%s'" % s)
exec s in globals(), locals()
except:
self.log.msg("Error running statement: %s" % s)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s"%self.log_url)
context = zmq.Context.instance()
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = PUBHandler(lsock)
handler.root_topic = 'controller'
handler.setLevel(self.log_level)
self.log.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPControllerApp, self).initialize(argv)
self.forward_logging()
self.load_secondary_config()
self.init_hub()
self.init_schedulers()
def start(self):
# Start the subprocesses:
self.factory.start()
# children must be started before signals are setup,
# otherwise signal-handling will fire multiple times
for child in self.children:
child.start()
self.init_signal()
self.write_pid_file(overwrite=True)
try:
self.factory.loop.start()
except KeyboardInterrupt:
self.log.critical("Interrupted, Exiting...\n")
finally:
self.cleanup_connection_files()
def launch_new_instance(*args, **kwargs):
"""Create and run the IPython controller"""
if sys.platform == 'win32':
# make sure we don't get called from a multiprocessing subprocess
# this can result in infinite Controllers being started on Windows
# which doesn't have a proper fork, so multiprocessing is wonky
# this only comes up when IPython has been installed using vanilla
# setuptools, and *not* distribute.
import multiprocessing
p = multiprocessing.current_process()
# the main process has name 'MainProcess'
# subprocesses will have names like 'Process-1'
if p.name != 'MainProcess':
# we are a subprocess, don't start another Controller!
return
return IPControllerApp.launch_instance(*args, **kwargs)
if __name__ == '__main__':
launch_new_instance()
| apache-2.0 |
jhuapl-marti/marti | env-crits/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 155 | 17606 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3] == "2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
# avoid 2.3 zipimport bug when 64 bits
compress = (sys.version >= "2.4")
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| mit |
listamilton/supermilton.repository | script.module.youtube.dl/lib/youtube_dl/extractor/musicplayon.py | 16 | 2312 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
mimetype2ext,
)
class MusicPlayOnIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?musicplayon\.com/play(?:-touch)?\?(?:v|pl=\d+&play)=(?P<id>\d+)'
_TESTS = [{
'url': 'http://en.musicplayon.com/play?v=433377',
'md5': '00cdcdea1726abdf500d1e7fd6dd59bb',
'info_dict': {
'id': '433377',
'ext': 'mp4',
'title': 'Rick Ross - Interview On Chelsea Lately (2014)',
'description': 'Rick Ross Interview On Chelsea Lately',
'duration': 342,
'uploader': 'ultrafish',
},
}, {
'url': 'http://en.musicplayon.com/play?pl=102&play=442629',
'only_matching': True,
}]
_URL_TEMPLATE = 'http://en.musicplayon.com/play?v=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
url = self._URL_TEMPLATE % video_id
page = self._download_webpage(url, video_id)
title = self._og_search_title(page)
description = self._og_search_description(page)
thumbnail = self._og_search_thumbnail(page)
duration = self._html_search_meta('video:duration', page, 'duration', fatal=False)
view_count = self._og_search_property('count', page, fatal=False)
uploader = self._html_search_regex(
r'<div>by <a href="[^"]+" class="purple">([^<]+)</a></div>', page, 'uploader', fatal=False)
sources = self._parse_json(
self._search_regex(r'setup\[\'_sources\'\]\s*=\s*([^;]+);', page, 'video sources'),
video_id, transform_source=js_to_json)
formats = [{
'url': compat_urlparse.urljoin(url, source['src']),
'ext': mimetype2ext(source.get('type')),
'format_note': source.get('data-res'),
} for source in sources]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': int_or_none(duration),
'view_count': int_or_none(view_count),
'formats': formats,
}
| gpl-2.0 |
myerssr/volatility | volatility/plugins/overlays/windows/ssdt_vtypes.py | 44 | 6998 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import volatility.debug as debug
import volatility.obj as obj
# SSDT structures for all x86 profiles *except* Win 2003 Server
ssdt_vtypes = {
'_SERVICE_DESCRIPTOR_TABLE' : [ 0x40, {
'Descriptors' : [0x0, ['array', 4, ['_SERVICE_DESCRIPTOR_ENTRY']]],
}],
'_SERVICE_DESCRIPTOR_ENTRY' : [ 0x10, {
'KiServiceTable' : [0x0, ['pointer', ['void']]],
'CounterBaseTable' : [0x4, ['pointer', ['unsigned long']]],
'ServiceLimit' : [0x8, ['unsigned long']],
'ArgumentTable' : [0xc, ['pointer', ['unsigned char']]],
}],
}
# SSDT structures for Win 2003 Server x86
ssdt_vtypes_2003 = {
'_SERVICE_DESCRIPTOR_TABLE' : [ 0x20, {
'Descriptors' : [0x0, ['array', 2, ['_SERVICE_DESCRIPTOR_ENTRY']]],
}],
}
# SSDT structures for x64
ssdt_vtypes_64 = {
'_SERVICE_DESCRIPTOR_TABLE' : [ 0x40, {
'Descriptors' : [0x0, ['array', 2, ['_SERVICE_DESCRIPTOR_ENTRY']]],
}],
'_SERVICE_DESCRIPTOR_ENTRY' : [ 0x20, {
'KiServiceTable' : [0x0, ['pointer64', ['void']]],
'CounterBaseTable' : [0x8, ['pointer64', ['unsigned long']]],
'ServiceLimit' : [0x10, ['unsigned long long']],
'ArgumentTable' : [0x18, ['pointer64', ['unsigned char']]],
}],
}
#### Filthy Hack for backwards compatibility
def syscalls_property(x):
debug.debug("Deprecation warning: Please use profile.additional['syscalls'] over profile.syscalls")
return x.additional.get('syscalls', [[], []])
class WinSyscallsAttribute(obj.ProfileModification):
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
# Filthy hack for backwards compatibilitiy
profile.__class__.syscalls = property(syscalls_property)
####
class AbstractSyscalls(obj.ProfileModification):
syscall_module = 'No default'
def modification(self, profile):
module = sys.modules.get(self.syscall_module, None)
profile.additional['syscalls'] = module.syscalls
class WinXPSyscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.xp_sp2_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x : x == 5,
'minor': lambda x : x == 1}
class Win64SyscallVTypes(obj.ProfileModification):
before = ['WindowsVTypes']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(ssdt_vtypes_64)
class Win2003SyscallVTypes(obj.ProfileModification):
before = ['WindowsVTypes']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2}
def modification(self, profile):
profile.vtypes.update(ssdt_vtypes_2003)
class Win2003SP0Syscalls(AbstractSyscalls):
# Win2003SP12Syscalls applies to SP0 as well, so this must be applied second
before = ['Win2003SP12Syscalls']
syscall_module = 'volatility.plugins.overlays.windows.win2003_sp0_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2,
'build': lambda x: x == 3789}
class Win2003SP12Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.win2003_sp12_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x : x == 5,
'minor': lambda x : x == 2}
class Win2003SP12x64Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.win2003_sp12_x64_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x : x == 5,
'minor': lambda x : x == 2}
class VistaSP0Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.vista_sp0_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 0,
'build': lambda x : x == 6000}
class VistaSP0x64Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.vista_sp0_x64_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 0,
'build': lambda x : x == 6000}
class VistaSP12Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.vista_sp12_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 0,
'build': lambda x : x >= 6001}
class VistaSP12x64Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.vista_sp12_x64_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 0,
'build': lambda x : x >= 6001}
class Win7SP01Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.win7_sp01_x86_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 1}
class Win7SP01x64Syscalls(AbstractSyscalls):
syscall_module = 'volatility.plugins.overlays.windows.win7_sp01_x64_syscalls'
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x : x == 6,
'minor': lambda x : x == 1}
| gpl-2.0 |
hastalafiesta/android_kernel_lge_g3 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
seaotterman/tensorflow | tensorflow/python/kernel_tests/matrix_inverse_op_test.py | 10 | 3692 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x):
for np_type in [np.float32, np.float64]:
for adjoint in False, True:
y = x.astype(np_type)
with self.test_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(y, tf_ans)
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverse(matrix1)
self._verifyInverse(matrix2)
# A multidimensional batch of 2x2 matrices
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
self._verifyInverse(matrix_batch)
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverse(matrix1)
self._verifyInverse(matrix2)
# A multidimensional batch of 2x2 matrices
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
self._verifyInverse(matrix_batch)
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverse(np.empty([0, 2, 2]))
self._verifyInverse(np.empty([2, 0, 0]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
OCA/program | program_multi_menu_budget/program_result_region.py | 1 | 1213 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Savoir-faire Linux (<www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class program_result_region(orm.Model):
_inherit = 'program.result.region'
_columns = {
'top_level_menu_id': fields.many2one('ir.ui.menu', 'Top Level Menu'),
}
| agpl-3.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/libxprintutil/package.py | 3 | 1793 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libxprintutil(AutotoolsPackage):
"""Xprint application utility routines."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXprintUtil/"
url = "https://www.x.org/archive/individual/lib/libXprintUtil-1.0.1.tar.gz"
version('1.0.1', '2f02e812f3e419534ced6fcb5860825f')
depends_on('libx11')
depends_on('libxp')
depends_on('libxt')
depends_on('libxau')
depends_on('printproto', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
loveyoupeng/rt | modules/web/src/main/native/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py | 379 | 79448 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.0"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| gpl-2.0 |
scalingdata/Impala | bin/start-impala-cluster.py | 6 | 13136 | #!/usr/bin/env python
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Starts up an Impala cluster (ImpalaD + State Store) with the specified number of
# ImpalaD instances. Each ImpalaD runs on a different port allowing this to be run
# on a single machine.
import os
import sys
from time import sleep, time
from optparse import OptionParser
# Options
parser = OptionParser()
parser.add_option("-s", "--cluster_size", type="int", dest="cluster_size", default=3,
help="Size of the cluster (number of impalad instances to start).")
parser.add_option("--build_type", dest="build_type", default= 'debug',
help="Build type to use - debug / release")
parser.add_option("--impalad_args", dest="impalad_args", default="",
help="Additional arguments to pass to each Impalad during startup")
parser.add_option("--state_store_args", dest="state_store_args", default="",
help="Additional arguments to pass to State Store during startup")
parser.add_option("--catalogd_args", dest="catalogd_args", default="",
help="Additional arguments to pass to the Catalog Service at startup")
parser.add_option("--kill", "--kill_only", dest="kill_only", action="store_true",
default=False, help="Instead of starting the cluster, just kill all"\
" the running impalads and the statestored.")
parser.add_option("--force_kill", dest="force_kill", action="store_true", default=False,
help="Force kill impalad and statestore processes.")
parser.add_option("-r", "--restart_impalad_only", dest="restart_impalad_only",
action="store_true", default=False,
help="Restarts only the impalad processes")
parser.add_option("--in-process", dest="inprocess", action="store_true", default=False,
help="Start all Impala backends and state store in a single process.")
parser.add_option("--log_dir", dest="log_dir", default="/tmp",
help="Directory to store output logs to.")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Prints all output to stderr/stdout.")
parser.add_option("--wait_for_cluster", dest="wait_for_cluster", action="store_true",
default=False, help="Wait until the cluster is ready to accept "\
"queries before returning.")
parser.add_option("--log_level", type="int", dest="log_level", default=1,
help="Set the impalad backend logging level")
parser.add_option("--jvm_args", dest="jvm_args", default="",
help="Additional arguments to pass to the JVM(s) during startup.")
options, args = parser.parse_args()
IMPALA_HOME = os.environ['IMPALA_HOME']
KNOWN_BUILD_TYPES = ['debug', 'release']
IMPALAD_PATH = os.path.join(IMPALA_HOME,
'bin/start-impalad.sh -build_type=%s' % options.build_type)
STATE_STORE_PATH = os.path.join(IMPALA_HOME,
'bin/start-statestored.sh -build_type=%s' % options.build_type)
CATALOGD_PATH = os.path.join(IMPALA_HOME,
'bin/start-catalogd.sh -build_type=%s' % options.build_type)
MINI_IMPALA_CLUSTER_PATH = IMPALAD_PATH + " -in-process"
IMPALA_SHELL = os.path.join(IMPALA_HOME, 'bin/impala-shell.sh')
IMPALAD_PORTS = ("-beeswax_port=%d -hs2_port=%d -be_port=%d "
"-state_store_subscriber_port=%d -webserver_port=%d "
"-llama_callback_port=%d")
JVM_ARGS = "-jvm_debug_port=%s -jvm_args=%s"
BE_LOGGING_ARGS = "-log_filename=%s -log_dir=%s -v=%s -logbufsecs=5"
CLUSTER_WAIT_TIMEOUT_IN_SECONDS = 240
def exec_impala_process(cmd, args, stderr_log_file_path):
redirect_output = str()
if options.verbose:
args += ' -logtostderr=1'
else:
redirect_output = "1>%s" % stderr_log_file_path
cmd = '%s %s %s 2>&1 &' % (cmd, args, redirect_output)
os.system(cmd)
def kill_cluster_processes(force=False):
kill_matching_processes('catalogd')
kill_matching_processes('impalad')
kill_matching_processes('statestored')
kill_matching_processes('mini-impala-cluster')
def kill_matching_processes(binary_name, force=False):
"""Kills all processes with the given binary name"""
# -w = Wait for processes to die.
kill_cmd = "killall -w"
if force: kill_cmd += " -9"
os.system("%s %s" % (kill_cmd, binary_name))
def start_statestore():
print "Starting State Store logging to %s/statestored.INFO" % options.log_dir
stderr_log_file_path = os.path.join(options.log_dir, "statestore-error.log")
args = "%s %s" % (build_impalad_logging_args(0, "statestored"),
options.state_store_args)
exec_impala_process(STATE_STORE_PATH, args, stderr_log_file_path)
def start_catalogd():
print "Starting Catalog Service logging to %s/catalogd.INFO" % options.log_dir
stderr_log_file_path = os.path.join(options.log_dir, "catalogd-error.log")
args = "%s %s %s" % (build_impalad_logging_args(0, "catalogd"),
options.catalogd_args, build_jvm_args(options.cluster_size))
exec_impala_process(CATALOGD_PATH, args, stderr_log_file_path)
def start_mini_impala_cluster(cluster_size):
print ("Starting in-process Impala Cluster logging "
"to %s/mini-impala-cluster.INFO" % options.log_dir)
args = "-num_backends=%s %s" %\
(cluster_size, build_impalad_logging_args(0, 'mini-impala-cluster'))
stderr_log_file_path = os.path.join(options.log_dir, 'mini-impala-cluster-error.log')
exec_impala_process(MINI_IMPALA_CLUSTER_PATH, args, stderr_log_file_path)
def build_impalad_port_args(instance_num):
BASE_BEESWAX_PORT = 21000
BASE_HS2_PORT = 21050
BASE_BE_PORT = 22000
BASE_STATE_STORE_SUBSCRIBER_PORT = 23000
BASE_WEBSERVER_PORT = 25000
BASE_LLAMA_CALLBACK_PORT = 28000
return IMPALAD_PORTS % (BASE_BEESWAX_PORT + instance_num, BASE_HS2_PORT + instance_num,
BASE_BE_PORT + instance_num,
BASE_STATE_STORE_SUBSCRIBER_PORT + instance_num,
BASE_WEBSERVER_PORT + instance_num,
BASE_LLAMA_CALLBACK_PORT + instance_num)
def build_impalad_logging_args(instance_num, service_name):
log_file_path = os.path.join(options.log_dir, "%s.INFO" % service_name)
return BE_LOGGING_ARGS % (service_name, options.log_dir, options.log_level)
def build_jvm_args(instance_num):
BASE_JVM_DEBUG_PORT = 30000
return JVM_ARGS % (BASE_JVM_DEBUG_PORT + instance_num, options.jvm_args)
def start_impalad_instances(cluster_size):
# Start each impalad instance and optionally redirect the output to a log file.
for i in range(options.cluster_size):
if i == 0:
# The first impalad always logs to impalad.INFO
service_name = "impalad"
else:
service_name = "impalad_node%s" % i
# Sleep between instance startup: simultaneous starts hurt the minikdc
# Yes, this is a hack, but it's easier than modifying the minikdc...
sleep(2)
args = "%s %s %s %s" %\
(build_impalad_logging_args(i, service_name), build_jvm_args(i),
build_impalad_port_args(i), options.impalad_args.replace("#ID", str(i)))
stderr_log_file_path = os.path.join(options.log_dir, '%s-error.log' % service_name)
exec_impala_process(IMPALAD_PATH, args, stderr_log_file_path)
def wait_for_impala_process_count(impala_cluster, retries=3):
"""Checks that the desired number of impalad/statestored processes are running.
Refresh until the number running impalad/statestored processes reaches the expected
number based on CLUSTER_SIZE, or the retry limit is hit. Failing this, raise a
RuntimeError.
"""
for i in range(retries):
if len(impala_cluster.impalads) < options.cluster_size or \
not impala_cluster.statestored or not impala_cluster.catalogd:
sleep(2)
impala_cluster.refresh()
msg = str()
if len(impala_cluster.impalads) < options.cluster_size:
impalads_found = len(impala_cluster.impalads)
msg += "Expected %d impalad(s), only %d found\n" %\
(options.cluster_size, impalads_found)
if not impala_cluster.statestored:
msg += "statestored failed to start.\n"
if not impala_cluster.catalogd:
msg += "catalogd failed to start.\n"
if msg:
raise RuntimeError, msg
def wait_for_cluster_web(timeout_in_seconds=CLUSTER_WAIT_TIMEOUT_IN_SECONDS):
"""Checks if the cluster is "ready"
A cluster is deemed "ready" if:
- All backends are registered with the statestore.
- Each impalad knows about all other impalads.
This information is retrieved by querying the statestore debug webpage
and each individual impalad's metrics webpage.
"""
impala_cluster = ImpalaCluster()
# impalad processes may take a while to come up.
wait_for_impala_process_count(impala_cluster)
for impalad in impala_cluster.impalads:
impalad.service.wait_for_num_known_live_backends(options.cluster_size,
timeout=CLUSTER_WAIT_TIMEOUT_IN_SECONDS, interval=2)
wait_for_catalog(impalad, timeout_in_seconds=CLUSTER_WAIT_TIMEOUT_IN_SECONDS)
def wait_for_catalog(impalad, timeout_in_seconds):
"""Waits for the impalad catalog to become ready"""
start_time = time()
catalog_ready = False
while (time() - start_time < timeout_in_seconds and not catalog_ready):
try:
num_dbs = impalad.service.get_metric_value('catalog.num-databases')
num_tbls = impalad.service.get_metric_value('catalog.num-tables')
catalog_ready = impalad.service.get_metric_value('catalog.ready')
print 'Waiting for Catalog... Status: %s DBs / %s tables (ready=%s)' %\
(num_dbs, num_tbls, catalog_ready)
except Exception, e:
print e
sleep(1)
if not catalog_ready:
raise RuntimeError, 'Catalog was not initialized in expected time period.'
def wait_for_cluster_cmdline(timeout_in_seconds=CLUSTER_WAIT_TIMEOUT_IN_SECONDS):
"""Checks if the cluster is "ready" by executing a simple query in a loop"""
start_time = time()
while os.system('%s -i localhost:21000 -q "%s"' % (IMPALA_SHELL, 'select 1')) != 0:
if time() - timeout_in_seconds > start_time:
raise RuntimeError, 'Cluster did not start within %d seconds' % timeout_in_seconds
print 'Cluster not yet available. Sleeping...'
sleep(2)
if __name__ == "__main__":
if options.kill_only:
kill_cluster_processes(force=options.force_kill)
sys.exit(0)
if options.build_type not in KNOWN_BUILD_TYPES:
print 'Invalid build type %s' % options.build_type
print 'Valid values: %s' % ', '.join(KNOWN_BUILD_TYPES)
sys.exit(1)
if options.cluster_size < 0:
print 'Please specify a cluster size >= 0'
sys.exit(1)
# Kill existing cluster processes based on the current configuration.
if options.restart_impalad_only:
if options.inprocess:
print 'Cannot perform individual component restarts using an in-process cluster'
sys.exit(1)
kill_matching_processes('impalad', force=options.force_kill)
else:
kill_cluster_processes(force=options.force_kill)
try:
import json
wait_for_cluster = wait_for_cluster_web
except ImportError:
print "json module not found, checking for cluster startup through the command-line"
wait_for_cluster = wait_for_cluster_cmdline
# If ImpalaCluster cannot be imported, fall back to the command-line to check
# whether impalads/statestore are up.
try:
from tests.common.impala_cluster import ImpalaCluster
if options.restart_impalad_only:
impala_cluster = ImpalaCluster()
if not impala_cluster.statestored or not impala_cluster.catalogd:
print 'No running statestored or catalogd detected. Restarting entire cluster.'
options.restart_impalad_only = False
except ImportError:
print 'ImpalaCluster module not found.'
# TODO: Update this code path to work similar to the ImpalaCluster code path when
# restarting only impalad processes. Specifically, we should do a full cluster
# restart if either the statestored or catalogd processes are down, even if
# restart_only_impalad=True.
wait_for_cluster = wait_for_cluster_cmdline
if options.inprocess:
# The statestore and the impalads start in the same process.
start_mini_impala_cluster(options.cluster_size)
wait_for_cluster_cmdline()
else:
try:
if not options.restart_impalad_only:
start_statestore()
start_catalogd()
start_impalad_instances(options.cluster_size)
wait_for_cluster()
except Exception, e:
print 'Error starting cluster: %s' % e
sys.exit(1)
print 'Impala Cluster Running with %d nodes.' % options.cluster_size
| apache-2.0 |
ribag/ganeti-experiments | test/py/ganeti.serializer_unittest.py | 2 | 7648 | #!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the serializer module"""
import doctest
import unittest
from ganeti import errors
from ganeti import ht
from ganeti import objects
from ganeti import serializer
import testutils
class TestSerializer(testutils.GanetiTestCase):
"""Serializer tests"""
_TESTDATA = [
"test",
255,
[1, 2, 3],
(1, 2, 3),
{"1": 2,
"foo": "bar"},
["abc", 1, 2, 3, 999,
{
"a1": ("Hello", "World"),
"a2": "This is only a test",
"a3": None,
"osparams:": serializer.PrivateDict({
"foo": 5,
})
}
]
]
def _TestSerializer(self, dump_fn, load_fn):
_dump_fn = lambda data: dump_fn(
data,
private_encoder=serializer.EncodeWithPrivateFields
)
for data in self._TESTDATA:
self.failUnless(_dump_fn(data).endswith("\n"))
self.assertEqualValues(load_fn(_dump_fn(data)), data)
def testGeneric(self):
self._TestSerializer(serializer.Dump, serializer.Load)
def testSignedGeneric(self):
self._TestSigned(serializer.DumpSigned, serializer.LoadSigned)
def testJson(self):
self._TestSerializer(serializer.DumpJson, serializer.LoadJson)
def testSignedJson(self):
self._TestSigned(serializer.DumpSignedJson, serializer.LoadSignedJson)
def _TestSigned(self, dump_fn, load_fn):
_dump_fn = lambda *args, **kwargs: dump_fn(
*args,
private_encoder=serializer.EncodeWithPrivateFields,
**kwargs
)
for data in self._TESTDATA:
self.assertEqualValues(load_fn(_dump_fn(data, "mykey"), "mykey"),
(data, ""))
self.assertEqualValues(load_fn(_dump_fn(data, "myprivatekey",
salt="mysalt"),
"myprivatekey"),
(data, "mysalt"))
keydict = {
"mykey_id": "myprivatekey",
}
self.assertEqualValues(load_fn(_dump_fn(data, "myprivatekey",
salt="mysalt",
key_selector="mykey_id"),
keydict.get),
(data, "mysalt"))
self.assertRaises(errors.SignatureError, load_fn,
_dump_fn(data, "myprivatekey",
salt="mysalt",
key_selector="mykey_id"),
{}.get)
self.assertRaises(errors.SignatureError, load_fn,
_dump_fn("test", "myprivatekey"),
"myotherkey")
self.assertRaises(errors.SignatureError, load_fn,
serializer.DumpJson("This is a test"), "mykey")
self.assertRaises(errors.SignatureError, load_fn,
serializer.DumpJson({}), "mykey")
# Message missing salt and HMAC
tdata = { "msg": "Foo", }
self.assertRaises(errors.SignatureError, load_fn,
serializer.DumpJson(tdata), "mykey")
class TestLoadAndVerifyJson(unittest.TestCase):
def testNoJson(self):
self.assertRaises(errors.ParseError, serializer.LoadAndVerifyJson,
"", NotImplemented)
self.assertRaises(errors.ParseError, serializer.LoadAndVerifyJson,
"}", NotImplemented)
def testVerificationFails(self):
self.assertRaises(errors.ParseError, serializer.LoadAndVerifyJson,
"{}", lambda _: False)
verify_fn = ht.TListOf(ht.TNonEmptyString)
try:
serializer.LoadAndVerifyJson("{}", verify_fn)
except errors.ParseError, err:
self.assertTrue(str(err).endswith(str(verify_fn)))
else:
self.fail("Exception not raised")
def testSuccess(self):
self.assertEqual(serializer.LoadAndVerifyJson("{}", ht.TAny), {})
self.assertEqual(serializer.LoadAndVerifyJson("\"Foo\"", ht.TAny), "Foo")
class TestPrivate(unittest.TestCase):
def testEquality(self):
pDict = serializer.PrivateDict()
pDict["bar"] = "egg"
nDict = {"bar": "egg"}
self.assertEqual(pDict, nDict, "PrivateDict-dict equality failure")
def testPrivateDictUnprivate(self):
pDict = serializer.PrivateDict()
pDict["bar"] = "egg"
uDict = pDict.Unprivate()
nDict = {"bar": "egg"}
self.assertEquals(type(uDict), dict,
"PrivateDict.Unprivate() did not return a dict")
self.assertEqual(pDict, uDict, "PrivateDict.Unprivate() equality failure")
self.assertEqual(nDict, uDict, "PrivateDict.Unprivate() failed to return")
def testAttributeTransparency(self):
class Dummy(object):
pass
dummy = Dummy()
dummy.bar = "egg"
pDummy = serializer.Private(dummy)
self.assertEqual(pDummy.bar, "egg", "Failed to access attribute of Private")
def testCallTransparency(self):
foo = serializer.Private("egg")
self.assertEqual(foo.upper(), "EGG", "Failed to call Private instance")
def testFillDict(self):
pDict = serializer.PrivateDict()
pDict["bar"] = "egg"
self.assertEqual(pDict, objects.FillDict({}, pDict))
def testLeak(self):
pDict = serializer.PrivateDict()
pDict["bar"] = "egg"
self.assertTrue("egg" not in str(pDict), "Value leaked in str(PrivateDict)")
self.assertTrue("egg" not in repr(pDict), "Value leak in repr(PrivateDict)")
self.assertTrue("egg" not in "{0}".format(pDict),
"Value leaked in PrivateDict.__format__")
self.assertTrue("egg" not in serializer.Dump(pDict),
"Value leaked in serializer.Dump(PrivateDict)")
def testProperAccess(self):
pDict = serializer.PrivateDict()
pDict["bar"] = "egg"
self.assertTrue("egg" is pDict["bar"].Get(),
"Value not returned by Private.Get()")
self.assertTrue("egg" is pDict.GetPrivate("bar"),
"Value not returned by Private.GetPrivate()")
self.assertTrue("egg" is pDict.Unprivate()["bar"],
"Value not returned by PrivateDict.Unprivate()")
json = serializer.Dump(pDict,
private_encoder=serializer.EncodeWithPrivateFields)
self.assertTrue("egg" in json)
def testDictGet(self):
result = serializer.PrivateDict().GetPrivate("bar", "tar")
self.assertTrue("tar" is result,
"Private.GetPrivate() did not handle the default case")
def testZeronessPrivate(self):
self.assertTrue(serializer.Private("foo"),
"Private of non-empty string is false")
self.assertFalse(serializer.Private(""), "Private empty string is true")
class TestCheckDoctests(unittest.TestCase):
def testCheckSerializer(self):
results = doctest.testmod(serializer)
self.assertEquals(results.failed, 0, "Doctest failures detected")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| gpl-2.0 |
avanzosc/avanzosc6.1 | avanzosc_tree_grid_ext/__openerp__.py | 1 | 1850 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC S.L. All Rights Reserved
# Date: 01/07/2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "AvanzOSC - tree_grid extension",
"version": "1.0",
"depends": ["tree_grid","sale","purchase","stock","account","avanzosc_calculate_coeficient_udv_automatically"],
"author": "AvanzOSC S.L.",
"category": "Generic",
"description": """
Este módulo añade la unidad de venta, y cantidad de venta editables en los tree de
líneas de pedido de compra, y de venta, líneas de factura, y líneas de albaranes.
""",
"init_xml": [],
'update_xml': ['sale_order_view_ext.xml',
'purchase_order_view_ext.xml',
'stock_picking_view_ext.xml',
'account_invoice_view_ext.xml',
'product_product_view_ext.xml'
],
'demo_xml': [],
'installable': True,
'active': False,
# 'certificate': 'certificate',
} | agpl-3.0 |
chun337163833/firesim | bin/lime.py | 2 | 14585 | #!/usr/bin/env python
"""Utilities for common tasks needed to use lime framework.
"""
import optparse
import subprocess
import logging
import sys
import os.path
import zipfile
import re
import shutil
import fileinput
import mimetypes
from os.path import join, splitext, split, exists
from shutil import copyfile
from datetime import datetime
import base64
import json
if sys.version_info[0]==3:
from urllib.request import urlretrieve
else :
from urllib import urlretrieve
basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
curdir = os.path.abspath('.')
closure_dir = os.path.join(basedir,'closure')
closure_deps_file = os.path.join(closure_dir,'closure/goog/deps.js')
box2d_dir = os.path.join(basedir,'box2d')
extdir = join(basedir,'bin/external')
compiler_path = os.path.join(extdir,'compiler-20130411.jar')
soy_path = os.path.join(extdir,'SoyToJsSrcCompiler.jar')
projects_path = join(basedir,'bin/projects')
# zipfile.extract & os.path.relpath missing in 2.5
if sys.version_info < (2,6):
print("Error. Python 2.6+ is required")
sys.exit(1)
def removeDupes(seq):
# Not order preserving
keys = {}
for e in seq:
keys[e.rstrip()] = 1
return keys.keys()
def makeProjectPaths(add):
lines = open(projects_path,'r').readlines()
if len(add):
lines.append(add)
newlines = filter(lambda x: exists(join(basedir,x.rstrip())) and len(x.rstrip()),lines)
newlines = removeDupes(newlines)
f = open(projects_path,'w')
f.write('\n'.join(newlines))
f.close()
def rephook(a,b,c):
sys.stdout.write("\r%2d%%" % ((100*a*b)/c) )
sys.stdout.flush()
def escapeSpace(s):
return s.replace(" ","\\ ")
def quoteSpace(s):
return s.replace(" ","' '")
def checkDependencies():
#Check git
retcode = subprocess.Popen(subprocess.list2cmdline(["git","--version"]), stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True).wait()
if retcode!=0:
logging.error('Lime requires git. Get it from http://git-scm.com/download')
sys.exit(1)
#Closure Library
if not (os.path.exists(closure_dir) and os.path.exists(closure_deps_file)):
print ('Closure Library not found. Downloading to %s' % closure_dir)
print ('Please wait...')
retcode = subprocess.Popen(subprocess.list2cmdline(["git","clone","https://code.google.com/p/closure-library/",closure_dir]),shell=True).wait()
if(retcode!=0):
print ('Failed to clone Closure Library via Git. Discontinuing.')
sys.exit(1)
#Box2D
if not os.path.exists(box2d_dir):
print ('Box2DJS not found. Downloading to %s' % box2d_dir)
print ('Please wait...')
retcode = subprocess.Popen(subprocess.list2cmdline(["git","clone","https://github.com/thinkpixellab/pl.git",box2d_dir]),shell=True).wait()
if(retcode!=0):
logging.error('Error while downloading Box2D. Discontinuing.')
sys.exit(1)
#External tools dir
if not os.path.exists(extdir):
os.mkdir(extdir)
#Closure compiler
if not os.path.exists(compiler_path):
zip_path = os.path.join(extdir,'compiler.zip')
print ('Downloading Closure Compiler: ')
urlretrieve("http://closure-compiler.googlecode.com/files/compiler-20130411.zip",zip_path,rephook)
print ('\nUnzipping...')
zippedFile = zipfile.ZipFile(zip_path)
zippedFile.extract('compiler.jar',extdir)
zippedFile.close()
print ('Cleanup')
os.unlink(zip_path)
os.rename(os.path.join(extdir,'compiler.jar'), compiler_path)
#Closure Templates
if not os.path.exists(soy_path):
zip_path = os.path.join(extdir,'soy.zip')
print ('Downloading Closure Templates(Soy):')
urlretrieve("http://closure-templates.googlecode.com/files/closure-templates-for-javascript-latest.zip",
zip_path,rephook)
print ('\nUnzipping...')
zippedFile = zipfile.ZipFile(zip_path)
zippedFile.extract('SoyToJsSrcCompiler.jar',extdir)
zippedFile.close()
print ('Cleanup')
os.unlink(zip_path)
if not os.path.exists(projects_path):
open(projects_path,'w').close()
makeProjectPaths('')
def update():
reldir = os.path.relpath(curdir,basedir)
if reldir!='.':
makeProjectPaths(reldir)
print ('Updating Closure deps file')
paths = open(projects_path,'r').readlines()
paths.append('lime\n')
paths.append('box2d/src\n')
opt = ' '.join(map(lambda x: '--root_with_prefix="'+quoteSpace(os.path.join(basedir,x.rstrip()))+'/ ../../../'+x.rstrip()+'/"',paths))
call = 'python ' + escapeSpace(os.path.join(closure_dir,'closure/bin/build/depswriter.py'))+' --root_with_prefix="'+\
quoteSpace(closure_dir)+'/ ../../" '+opt+' --output_file="'+closure_deps_file+'"'
print (call)
subprocess.call(call,shell=True)
def create(name):
path = os.path.join(curdir,name)
if exists(path):
logging.error('Directory already exists: %s',path)
sys.exit(1)
name = os.path.basename(path)
proj = os.path.relpath(path,basedir)
shutil.copytree(os.path.join(basedir,'lime/templates/default'),path)
for root, dirs, files in os.walk(path):
for fname in files:
newname = fname.replace('__name__',name)
if fname.find("__name__")!=-1:
os.rename(os.path.join(path,fname),os.path.join(path,newname))
for line in fileinput.FileInput(os.path.join(path,newname),inplace=1):
line = line.replace('{name}',name)
print(line.rstrip())
print ('Created %s' % path)
if proj!='.':
makeProjectPaths(os.path.relpath(path,basedir))
update()
def makeSoyJSFile(path,stringbuilder):
if path[-4:]=='.soy':
call = "java -jar "+soy_path+" --cssHandlingScheme goog --shouldProvideRequireSoyNamespaces --outputPathFormat "+path+".js "
if not stringbuilder:
call+= "--codeStyle concat "
call += path;
print (call)
subprocess.call(call,shell=True)
def genSoy(path):
if not os.path.exists(path):
logging.error('No such directory %s',path)
exit(1)
if os.path.isfile(path):
mtype = mimetypes.guess_type(path)[0]
fname = split(path)[1]
if path[-4:]=='.soy':
makeSoyJSFile(path,True)
elif path[-5:]=='.json':
infile= open(path,'r')
outfile = open(path+'.js','w')
outfile.write('goog.provide(\'lime.ASSETS.'+fname+'\');\ngoog.require(\'soy\');\n\n'+ \
'lime.ASSETS.'+fname+'.data = function(opt_data) { \nreturn JSON.parse("'+ json.dumps(json.loads(infile.read()), separators=(',',':')).replace("\"", "\\\"")+'");\n}')
infile.close()
outfile.close()
elif mtype and ['image','audio','video'].count(mtype.split('/')[0]):
infile= open(path,'r')
outfile = open(path+'.soy','w')
outfile.write('{namespace lime.ASSETS.'+fname+'}\n\n/**\n * Generated with "bin/lime.py gensoy filepath"\n */\n{template .data}\n{literal}')
outfile.write('data:'+mtype+';base64,')
outfile.write(base64.b64encode(infile.read()))
outfile.write('{/literal}\n{/template}\n')
infile.close()
outfile.close()
makeSoyJSFile(path+'.soy',False)
else :
outfile = open(path+'.soy','w')
outfile.write('{namespace lime.ASSETS.'+fname+'}\n\n/**\n * Generated with "bin/lime.py gensoy filepath"\n */\n{template .data}\n')
for line in fileinput.FileInput(path):
line = line.replace('{','[[LB_POS]]')
line = line.replace('}','[[RB_POS]]')
line = line.replace('[[LB_POS]]','{lb}')
line = line.replace('[[RB_POS]]','{rb}')
outfile.write(line);
outfile.write('\n{/template}\n')
outfile.close()
makeSoyJSFile(path+'.soy',False)
else:
for root,dirs,files in os.walk(path):
for fname in files:
if fname[-4:]=='.soy':
soypath = os.path.join(root,fname)
makeSoyJSFile(soypath,False)
update()
def build(name,options):
dir_list = open(projects_path,'r').readlines()
dir_list.append('lime')
dir_list.append('box2d/src')
dir_list.append('closure')
#dir_list = filter(lambda x: os.path.isdir(os.path.join(basedir,x)) and ['.git','bin','docs'].count(x)==0 ,os.listdir(basedir))
opt = ' '.join(map(lambda x: '--root="'+os.path.join(basedir,x.rstrip())+'/"',dir_list))
call = 'python ' + escapeSpace(os.path.join(closure_dir,'closure/bin/build/closurebuilder.py'))+' '+opt+' --namespace="'+name+'" '+\
'-o compiled -c '+compiler_path;
if options.advanced:
call+=" -f --compilation_level=ADVANCED_OPTIMIZATIONS"
if options.externs_file:
for i, opt in enumerate(options.externs_file):
call+=" -f --externs="+opt
outname = options.output
if options.output[-3:] != '.js':
outname += '.js'
if options.map_file:
call+=" -f --formatting=PRETTY_PRINT -f --source_map_format=V3 -f --create_source_map="+outname+'.map'
else:
call+=" -f --define='goog.DEBUG=false'"
if options.use_strict:
call+=" -f --language_in=ECMASCRIPT5_STRICT"
if options.define:
for i, opt in enumerate(options.define):
call+=" -f --define='"+opt+"'"
if options.output:
call+=' --output_file="'+outname+'"'
if not exists(os.path.dirname(outname)):
os.makedirs(os.path.dirname(outname))
errhandle = 0
try:
subprocess.check_call(call, shell=True);
except subprocess.CalledProcessError:
# handle error later
errhandle = 1
pass
if options.map_file:
map_filename = outname+'.map'
map_file = open(map_filename, 'r+')
# make source paths relative in map file
data = json.load(map_file)
data['sources'] = map(lambda p: os.path.relpath(p, os.path.dirname(map_filename)), data['sources'])
map_file.close()
map_file = open(map_filename, 'w')
json.dump(data, map_file)
map_file.close()
# add path to map file
out_file = open(outname, 'a')
out_file.write('\n//@ sourceMappingURL=' + os.path.relpath(map_filename, os.path.dirname(outname)))
out_file.close()
if options.output and options.preload:
name = os.path.basename(outname)[:-3]
target = os.path.dirname(outname)
source = os.path.join(basedir,'lime/templates/preloader')
for root, dirs, files in os.walk(source):
for fname in files:
from_ = join(root, fname)
to_ = from_.replace(source, target, 1)
to_directory = split(to_)[0]
to_ = to_.replace('__name__',name)
if not exists(to_directory):
os.makedirs(to_directory)
if not exists(to_):
copyfile(from_, to_)
for root, dirs, files in os.walk(target):
for fname in files:
if exists(os.path.join(target,fname)):
for line in fileinput.FileInput(os.path.join(target,fname),inplace=1):
line = line.replace('{name}',name)
line = line.replace('{callback}',options.preload)
if fname == name+'.manifest':
line = re.sub(r'# Updated on:.*','# Updated on: '+datetime.now().strftime("%Y-%m-%d %H:%M:%S"),line)
print(line.rstrip())
if errhandle == 1:
exit(1)
def main():
"""The entrypoint for this script."""
usage = """usage: %prog [command] [options]
Commands:
init Check lime dependecies and setup if needed
update Update Closure dependency file. Need to run every time you
change goog.provide() or goog.require()
create [path/name] Setup new project [name]
gensoy [path] Convert all *.soy files under path to *.soy.js files
build [name] Compile project to single Javascript file"""
parser = optparse.OptionParser(usage)
parser.add_option("-a", "--advanced", dest="advanced", action="store_true",
help="Build uses ADVANCED_OPTIMIZATIONS mode (encouraged)")
parser.add_option('-e', '--externs', dest="externs_file", action='append',
help="File with externs declarations.")
parser.add_option("-o", "--output", dest="output", action="store", type="string",
help="Output file for build result")
parser.add_option("-m", "--map", dest="map_file", action="store_true",
help="Build result sourcemap for debugging. Also turns on pretty print.")
parser.add_option("-s", "--use-strict", dest="use_strict", action="store_true",
help="Use EcmaScript5 strict mode.")
parser.add_option("-p", "--preload", dest="preload", action="store", type="string",
help="Generate preloader code with given callback as start point.")
parser.add_option("-d", "--define", dest="define", action="append",
help="Define custom variable accessible before build.")
(options, args) = parser.parse_args()
if not (len(args) == 2 or (len(args)==1 and ['init','update'].count(args[0])==1 )) :
parser.error('incorrect number of arguments')
checkDependencies()
if args[0]=='init' or args[0]=='update':
update()
elif args[0]=='create':
create(args[1])
elif args[0]=='gensoy':
genSoy(args[1])
elif args[0]=='build':
build(args[1],options)
else:
logging.error('No such command: %s',args[0])
exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 |
bigfatpaulyj/py-airfoil | scons-local-2.2.0/SCons/compat/_scons_collections.py | 14 | 1905 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
lordtangent/arsenalsuite | python/scripts/missing_timesheet_handler.py | 10 | 3347 | #!/usr/bin/python
from PyQt4.QtCore import *
from blur.Stone import *
from blur.Classes import *
import sys, re, blur.email, blur.jabber
app = None
dryRun = False
def init():
global app
global dryRun
# First Create a Qt Application
app = QCoreApplication(sys.argv)
# Load database config
if sys.platform=='win32':
initConfig("c:\\blur\\resin\\resin.ini")
else:
initConfig("/etc/missing_timesheet_handler.ini", "/var/log/ab/missing_timesheet_handler.log")
config().readFromFile( "/etc/db.ini", False ) #Backup read, in case of no missing_timesheet_handler.ini
classes_loader()
Database.current().connection().reconnect()
if '-dry-run' in sys.argv:
dryRun = True
def checkTimesheetOnDate( user, date ):
tsl = TimeSheet.select( "fkeyemployee=? AND datetime=?", [QVariant(user.key()),QVariant(date)] )
return tsl.size()
vacationCat = None
# Returns true if vacation timesheets were created for this user/date
def checkVacationMigration( user, date ):
global vacationCat
if vacationCat is None:
vacationCat = AssetType.recordByName( 'Vacation' )
virtualProject = Project.recordByName( "_virtual_project" );
vacSchedules = Schedule.select( "fkeyassettype=? AND date=? AND fkeyuser=?", [QVariant(vacationCat.key()), QVariant(date), QVariant(user.key())] )
if vacSchedules.isEmpty():
return False
ts = TimeSheet()
ts.setUser(user)
ts.setDateTime(QDateTime(date))
ts.setScheduledHour( 8 )
ts.setElement( virtualProject )
ts.setProject( virtualProject )
ts.commit()
print "Created Vacation Timesheet from vacation schedule for ", user.fullName(), date.toString()
return True
def checkTimesheetsForUser( user, firstDate, lastDate ):
date = firstDate
missingDates = []
while date <= lastDate:
if isWorkday(date) and user.dateOfHire() < date and checkTimesheetOnDate( user, date )==0 and not checkVacationMigration( user, date ):
missingDates.append(date)
date = date.addDays(1)
return missingDates
def getUsersToCheck():
return Employee.select( "WHERE coalesce(disabled,0)=0 and dateoftermination is null and dateofhire is not null and keyelement not in (SELECT fkeyusr from usrgrp WHERE fkeygrp IN(SELECT keygrp FROM grp WHERE grp IN ('IT','Design','HR','Production','No_Timesheets')))" )
#ret = EmployeeList()
#for name in ['newellm','duane']:
# ret += Employee.recordByUserName(name)
#return ret
def main():
# Check the last 30 days
currentDate = QDate.currentDate()
lastDate = currentDate.addDays(-3)
firstDate = lastDate.addDays(-31)
msgLog = ''
for user in getUsersToCheck():
if not user.isRecord(): continue
missingDates = checkTimesheetsForUser( user, firstDate, lastDate )
if missingDates:
msg = "missing timesheets on the following dates\n" + '\n'.join([str(date.toString()) for date in missingDates])
print "User", user.displayName(), "is", msg
msg = "You have " + msg + "\nPlease open Resin and post your time." #+ "\nresin://viewName=Calendar"
jid = str(user.name() + '@jabber.blur.com')
if not dryRun:
blur.jabber.send('thepipe@jabber.blur.com/Timesheets','thePIpe', jid, msg )
msgLog += 'Jabber sent to ' + jid + '\n' + msg + '\n\n'
print msgLog
if not dryRun:
blur.email.send( 'thePipe@blur.com', ['newellm@blur.com','duane@blur.com','pat@blur.com'], 'Missing Timesheet Jabber Log', msgLog )
if __name__=="__main__":
init()
main()
| gpl-2.0 |
lukasgartmair/3Depict_Isosurfaces | docs/manual-latex/python-example.py | 2 | 1189 | #!/usr/bin/python
import sys
import os
#Append the contents of one file to another
def appendFile(sourceFile,targetFile):
try :
fileSrc = open(sourceFile,"rb")
fileTarget = open(targetFile,"ab")
#Extremely inefficient!!
byte = fileSrc.read(1)
while byte != "" :
fileTarget.write(byte)
byte=fileSrc.read(1)
except IOError:
return 1
return 0
def main():
argv = sys.argv
#Name of file that we will dump our results to
OUTPUT_POSFILE="output.pos"
#Remove any old files from previous runs
if os.path.isfile(OUTPUT_POSFILE) :
os.remove(OUTPUT_POSFILE)
# do nothing if we have no arguments
if(len(argv) < 2) :
return 0;
#Loop over all our inputs, then for .pos files,
# create one big file with all data merged
for i in argv[1:] :
print "given file :" + i
fileExt = i[-3:];
if fileExt == "pos" :
if appendFile(i,OUTPUT_POSFILE):
return 1; #Output to file failed, for some reason
else :
print "appended file to " + OUTPUT_POSFILE
else :
#Filename did not end in .pos, lets ignore it.
print "File :" + i + " does not appear to be a pos file";
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
ufal/lindat-kontext | lib/plugins/default_token_connect/backends/__init__.py | 2 | 5098 | # Copyright (c) 2017 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2017 Tomas Machalek <tomas.machalek@gmail.com>
# Copyright (c) 2017 Petr Duda <petrduda@seznam.cz>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import httplib
import urllib
import logging
import sqlite3
from plugins.default_token_connect.backends.cache import cached
from plugins.abstract.token_connect import AbstractBackend, BackendException
class SQLite3Backend(AbstractBackend):
def __init__(self, conf, ident):
super(SQLite3Backend, self).__init__(ident)
self._db = sqlite3.connect(conf['path'])
self._query_tpl = conf['query']
@cached
def fetch(self, corpora, token_id, num_tokens, query_args, lang):
cur = self._db.cursor()
cur.execute(self._query_tpl, (query_args['word'], query_args['lemma']))
ans = cur.fetchone()
if ans:
return ans[0], True
else:
return '', False
class HTTPBackend(AbstractBackend):
"""
The default_token_connect's JSON config file defines a template of an abstract path identifying a resource.
It can be a URL path, SQL or a filesystem path. Such a template can use values defined in conf.attrs. Structural
attribute names are accessed like this: struct[attr]. E.g. attrs = ["word", "lemma", "doc.id"] can be used in
a URL template like this: my_server/my/path?word={word}&lemma={lemma}&docid={doc[id]}.
There are also some predefined attributes (with lower priority, i.e. you can overwrite then with your attrs spec.):
- ui_lang
- corpus
- corpus2 (= first aligned corpus)
- token_id (numeric token index specifies an absolute order of the token in corpus)
- num_tokens (mainly for multi-word kwics)
"""
def __init__(self, conf, ident):
super(HTTPBackend, self).__init__(ident)
self._conf = conf
@staticmethod
def _is_valid_response(response):
return response and (200 <= response.status < 300 or 400 <= response.status < 500)
@staticmethod
def _is_found(response):
return 200 <= response.status < 300
def create_connection(self):
if self._conf['ssl']:
return httplib.HTTPSConnection(
self._conf['server'], port=self._conf['port'], timeout=15)
else:
return httplib.HTTPConnection(
self._conf['server'], port=self._conf['port'], timeout=15)
def process_response(self, connection):
response = connection.getresponse()
if self._is_valid_response(response):
logging.getLogger(__name__).debug(
u'HTTP Backend response status: {0}'.format(response.status))
return response.read().decode('utf-8'), self._is_found(response)
else:
raise Exception('Failed to load the data - error {0}'.format(response.status))
@staticmethod
def enc_val(s):
if type(s) is unicode:
return urllib.quote(s.encode('utf-8'))
return urllib.quote(s)
def get_required_attrs(self):
if 'posAttrs' in self._conf:
logging.getLogger(__name__).warning(
'You are using a deprecated "conf.posAttr" value; please use "conf.attrs" instead.')
return self._conf.get('posAttrs', [])
else:
return self._conf.get('attrs', [])
@cached
def fetch(self, corpora, token_id, num_tokens, query_args, lang):
connection = self.create_connection()
try:
args = dict(
ui_lang=self.enc_val(lang), corpus=self.enc_val(corpora[0]),
corpus2=self.enc_val(corpora[1] if len(corpora) > 1 else ''),
token_id=token_id, num_tokens=num_tokens,
**dict((k, dict((k2, self.enc_val(v2)) for k2, v2 in v.items()) if type(v) is dict else self.enc_val(v)
) for k, v in query_args.items()))
logging.getLogger(__name__).debug('HTTP Backend args: {0}'.format(args))
try:
query_string = self._conf['path'].format(**args).encode('utf-8', 'replace')
except KeyError as ex:
raise BackendException(u'Failed to build query - value {0} not found'.format(ex))
connection.request('GET', query_string)
return self.process_response(connection)
finally:
connection.close()
| gpl-2.0 |
tchellomello/home-assistant | tests/components/homekit_controller/test_switch.py | 21 | 4751 | """Basic checks for HomeKitSwitch."""
from aiohomekit.model.characteristics import (
CharacteristicsTypes,
InUseValues,
IsConfiguredValues,
)
from aiohomekit.model.services import ServicesTypes
from tests.components.homekit_controller.common import setup_test_component
def create_switch_service(accessory):
"""Define outlet characteristics."""
service = accessory.add_service(ServicesTypes.OUTLET)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = False
outlet_in_use = service.add_char(CharacteristicsTypes.OUTLET_IN_USE)
outlet_in_use.value = False
def create_valve_service(accessory):
"""Define valve characteristics."""
service = accessory.add_service(ServicesTypes.VALVE)
on_char = service.add_char(CharacteristicsTypes.ACTIVE)
on_char.value = False
in_use = service.add_char(CharacteristicsTypes.IN_USE)
in_use.value = InUseValues.IN_USE
configured = service.add_char(CharacteristicsTypes.IS_CONFIGURED)
configured.value = IsConfiguredValues.CONFIGURED
remaining = service.add_char(CharacteristicsTypes.REMAINING_DURATION)
remaining.value = 99
async def test_switch_change_outlet_state(hass, utcnow):
"""Test that we can turn a HomeKit outlet on and off again."""
helper = await setup_test_component(hass, create_switch_service)
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True
)
assert helper.characteristics[("outlet", "on")].value == 1
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.testdevice"}, blocking=True
)
assert helper.characteristics[("outlet", "on")].value == 0
async def test_switch_read_outlet_state(hass, utcnow):
"""Test that we can read the state of a HomeKit outlet accessory."""
helper = await setup_test_component(hass, create_switch_service)
# Initial state is that the switch is off and the outlet isn't in use
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "off"
assert switch_1.attributes["outlet_in_use"] is False
# Simulate that someone switched on the device in the real world not via HA
helper.characteristics[("outlet", "on")].set_value(True)
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "on"
assert switch_1.attributes["outlet_in_use"] is False
# Simulate that device switched off in the real world not via HA
helper.characteristics[("outlet", "on")].set_value(False)
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "off"
# Simulate that someone plugged something into the device
helper.characteristics[("outlet", "outlet-in-use")].value = True
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "off"
assert switch_1.attributes["outlet_in_use"] is True
async def test_valve_change_active_state(hass, utcnow):
"""Test that we can turn a valve on and off again."""
helper = await setup_test_component(hass, create_valve_service)
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True
)
assert helper.characteristics[("valve", "active")].value == 1
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.testdevice"}, blocking=True
)
assert helper.characteristics[("valve", "active")].value == 0
async def test_valve_read_state(hass, utcnow):
"""Test that we can read the state of a valve accessory."""
helper = await setup_test_component(hass, create_valve_service)
# Initial state is that the switch is off and the outlet isn't in use
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "off"
assert switch_1.attributes["in_use"] is True
assert switch_1.attributes["is_configured"] is True
assert switch_1.attributes["remaining_duration"] == 99
# Simulate that someone switched on the device in the real world not via HA
helper.characteristics[("valve", "active")].set_value(True)
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == "on"
# Simulate that someone configured the device in the real world not via HA
helper.characteristics[
("valve", "is-configured")
].value = IsConfiguredValues.NOT_CONFIGURED
switch_1 = await helper.poll_and_get_state()
assert switch_1.attributes["is_configured"] is False
# Simulate that someone using the device in the real world not via HA
helper.characteristics[("valve", "in-use")].value = InUseValues.NOT_IN_USE
switch_1 = await helper.poll_and_get_state()
assert switch_1.attributes["in_use"] is False
| apache-2.0 |
CodingCat/mxnet | python/mxnet/gluon/model_zoo/vision/resnet.py | 3 | 19913 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""ResNets, implemented in Gluon."""
from __future__ import division
__all__ = ['ResNetV1', 'ResNetV2',
'BasicBlockV1', 'BasicBlockV2',
'BottleneckV1', 'BottleneckV2',
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'get_resnet']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
# Helpers
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
# Blocks
class BasicBlockV1(HybridBlock):
r"""BasicBlock V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(_conv3x3(channels, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels, 1, channels))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(residual+x, act_type='relu')
return x
class BottleneckV1(HybridBlock):
r"""Bottleneck V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, 1, channels//4))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(x + residual, act_type='relu')
return x
class BasicBlockV2(HybridBlock):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = _conv3x3(channels, stride, in_channels)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels, 1, channels)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
return x + residual
class BottleneckV2(HybridBlock):
r"""Bottleneck V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = nn.Conv2D(channels//4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels//4, stride, channels//4)
self.bn3 = nn.BatchNorm()
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = F.Activation(x, act_type='relu')
x = self.conv3(x)
return x + residual
# Nets
class ResNetV1(HybridBlock):
r"""ResNet V1 model from
`"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV1, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=channels[i]))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes, in_units=channels[-1])
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class ResNetV2(HybridBlock):
r"""ResNet V2 model from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.BatchNorm(scale=False, center=False))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=in_channels))
in_channels = channels[i+1]
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
resnet_spec = {18: ('basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: ('basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: ('bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: ('bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: ('bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
resnet_net_versions = [ResNetV1, ResNetV2]
resnet_block_versions = [{'basic_block': BasicBlockV1, 'bottle_neck': BottleneckV1},
{'basic_block': BasicBlockV2, 'bottle_neck': BottleneckV2}]
# Constructor
def get_resnet(version, num_layers, pretrained=False, ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert num_layers in resnet_spec, \
"Invalid number of layers: %d. Options are %s"%(
num_layers, str(resnet_spec.keys()))
block_type, layers, channels = resnet_spec[num_layers]
assert version >= 1 and version <= 2, \
"Invalid resnet version: %d. Options are 1 and 2."%version
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('resnet%d_v%d'%(num_layers, version),
root=root), ctx=ctx)
return net
def resnet18_v1(**kwargs):
r"""ResNet-18 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 18, **kwargs)
def resnet34_v1(**kwargs):
r"""ResNet-34 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 34, **kwargs)
def resnet50_v1(**kwargs):
r"""ResNet-50 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 50, **kwargs)
def resnet101_v1(**kwargs):
r"""ResNet-101 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 101, **kwargs)
def resnet152_v1(**kwargs):
r"""ResNet-152 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 152, **kwargs)
def resnet18_v2(**kwargs):
r"""ResNet-18 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 18, **kwargs)
def resnet34_v2(**kwargs):
r"""ResNet-34 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 34, **kwargs)
def resnet50_v2(**kwargs):
r"""ResNet-50 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 50, **kwargs)
def resnet101_v2(**kwargs):
r"""ResNet-101 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 101, **kwargs)
def resnet152_v2(**kwargs):
r"""ResNet-152 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 152, **kwargs)
| apache-2.0 |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/keystoneauth1/loading/session.py | 3 | 10154 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
try:
from oslo_config import cfg
except ImportError:
cfg = None
from keystoneauth1 import _utils as utils
from keystoneauth1.loading import base
from keystoneauth1 import session
__all__ = ['register_argparse_arguments',
'load_from_argparse_arguments',
'register_conf_options',
'load_from_conf_options',
'get_conf_options']
def _positive_non_zero_float(argument_value):
if argument_value is None:
return None
try:
value = float(argument_value)
except ValueError:
msg = "%s must be a float" % argument_value
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "%s must be greater than 0" % argument_value
raise argparse.ArgumentTypeError(msg)
return value
class Session(base.BaseLoader):
@property
def plugin_class(self):
return session.Session
def get_options(self):
return []
@utils.positional(1)
def load_from_options(self,
insecure=False,
verify=None,
cacert=None,
cert=None,
key=None,
**kwargs):
"""Create a session with individual certificate parameters.
Some parameters used to create a session don't lend themselves to be
loaded from config/CLI etc. Create a session by converting those
parameters into session __init__ parameters.
"""
if verify is None:
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
return super(Session, self).load_from_options(verify=verify,
cert=cert,
**kwargs)
def register_argparse_arguments(self, parser):
parser.add_argument('--insecure',
default=False,
action='store_true',
help='Explicitly allow client to perform '
'"insecure" TLS (https) requests. The '
'server\'s certificate will not be verified '
'against any certificate authorities. This '
'option should be used with caution.')
parser.add_argument('--os-cacert',
metavar='<ca-certificate>',
default=os.environ.get('OS_CACERT'),
help='Specify a CA bundle file to use in '
'verifying a TLS (https) server certificate. '
'Defaults to env[OS_CACERT].')
parser.add_argument('--os-cert',
metavar='<certificate>',
default=os.environ.get('OS_CERT'),
help='Defaults to env[OS_CERT].')
parser.add_argument('--os-key',
metavar='<key>',
default=os.environ.get('OS_KEY'),
help='Defaults to env[OS_KEY].')
parser.add_argument('--timeout',
default=600,
type=_positive_non_zero_float,
metavar='<seconds>',
help='Set request timeout (in seconds).')
def load_from_argparse_arguments(self, namespace, **kwargs):
kwargs.setdefault('insecure', namespace.insecure)
kwargs.setdefault('cacert', namespace.os_cacert)
kwargs.setdefault('cert', namespace.os_cert)
kwargs.setdefault('key', namespace.os_key)
kwargs.setdefault('timeout', namespace.timeout)
return self.load_from_options(**kwargs)
def get_conf_options(self, deprecated_opts=None):
"""Get the oslo_config options that are needed for a
:py:class:`.Session`.
These may be useful without being registered for config file generation
or to manipulate the options before registering them yourself.
The options that are set are:
:cafile: The certificate authority filename.
:certfile: The client certificate file to present.
:keyfile: The key for the client certificate.
:insecure: Whether to ignore SSL verification.
:timeout: The max time to wait for HTTP connections.
:param dict deprecated_opts: Deprecated options that should be included
in the definition of new options. This should be a dict from the
name of the new option to a list of oslo.DeprecatedOpts that
correspond to the new option. (optional)
For example, to support the ``ca_file`` option pointing to the new
``cafile`` option name::
old_opt = oslo_cfg.DeprecatedOpt('ca_file', 'old_group')
deprecated_opts={'cafile': [old_opt]}
:returns: A list of oslo_config options.
"""
if not cfg:
raise ImportError("oslo.config is not an automatic dependency of "
"keystoneauth. If you wish to use oslo.config "
"you need to import it into your application's "
"requirements file. ")
if deprecated_opts is None:
deprecated_opts = {}
return [cfg.StrOpt('cafile',
deprecated_opts=deprecated_opts.get('cafile'),
help='PEM encoded Certificate Authority to use '
'when verifying HTTPs connections.'),
cfg.StrOpt('certfile',
deprecated_opts=deprecated_opts.get('certfile'),
help='PEM encoded client certificate cert file'),
cfg.StrOpt('keyfile',
deprecated_opts=deprecated_opts.get('keyfile'),
help='PEM encoded client certificate key file'),
cfg.BoolOpt('insecure',
default=False,
deprecated_opts=deprecated_opts.get('insecure'),
help='Verify HTTPS connections.'),
cfg.IntOpt('timeout',
deprecated_opts=deprecated_opts.get('timeout'),
help='Timeout value for http requests'),
]
def register_conf_options(self, conf, group, deprecated_opts=None):
"""Register the oslo_config options that are needed for a session.
The options that are set are:
:cafile: The certificate authority filename.
:certfile: The client certificate file to present.
:keyfile: The key for the client certificate.
:insecure: Whether to ignore SSL verification.
:timeout: The max time to wait for HTTP connections.
:param oslo_config.Cfg conf: config object to register with.
:param string group: The ini group to register options in.
:param dict deprecated_opts: Deprecated options that should be included
in the definition of new options. This should be a dict from the
name of the new option to a list of oslo.DeprecatedOpts that
correspond to the new option. (optional)
For example, to support the ``ca_file`` option pointing to the new
``cafile`` option name::
old_opt = oslo_cfg.DeprecatedOpt('ca_file', 'old_group')
deprecated_opts={'cafile': [old_opt]}
:returns: The list of options that was registered.
"""
opts = self.get_conf_options(deprecated_opts=deprecated_opts)
conf.register_group(cfg.OptGroup(group))
conf.register_opts(opts, group=group)
return opts
def load_from_conf_options(self, conf, group, **kwargs):
"""Create a session object from an oslo_config object.
The options must have been previously registered with
register_conf_options.
:param oslo_config.Cfg conf: config object to register with.
:param string group: The ini group to register options in.
:param dict kwargs: Additional parameters to pass to session
construction.
:returns: A new session object.
:rtype: :py:class:`.Session`
"""
c = conf[group]
kwargs.setdefault('insecure', c.insecure)
kwargs.setdefault('cacert', c.cafile)
kwargs.setdefault('cert', c.certfile)
kwargs.setdefault('key', c.keyfile)
kwargs.setdefault('timeout', c.timeout)
return self.load_from_options(**kwargs)
def register_argparse_arguments(*args, **kwargs):
return Session().register_argparse_arguments(*args, **kwargs)
def load_from_argparse_arguments(*args, **kwargs):
return Session().load_from_argparse_arguments(*args, **kwargs)
def register_conf_options(*args, **kwargs):
return Session().register_conf_options(*args, **kwargs)
def load_from_conf_options(*args, **kwargs):
return Session().load_from_conf_options(*args, **kwargs)
def get_conf_options(*args, **kwargs):
return Session().get_conf_options(*args, **kwargs)
| mit |
ibank/node-gyp | gyp/pylib/gyp/msvs_emulation.py | 73 | 31885 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
self.dxsdk_dir = _FindDirectXInstallation()
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
self.wdk_dir = os.environ.get('WDK_DIR')
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = self.GetTargetPlatform(config)
target_platform = {'x86': 'Win32'}.get(target_platform, target_platform)
replacements = {
'$(VSInstallDir)': self.vs_version.Path(),
'$(VCInstallDir)': os.path.join(self.vs_version.Path(), 'VC') + '\\',
'$(OutDir)\\': base_to_build + '\\' if base_to_build else '',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(InputPath)': '${source}',
'$(InputName)': '${root}',
'$(ProjectName)': self.spec['target_name'],
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
}
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
replacements['$(DXSDK_DIR)'] = self.dxsdk_dir if self.dxsdk_dir else ''
replacements['$(WDK_DIR)'] = self.wdk_dir if self.wdk_dir else ''
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
return [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetTargetPlatform(self, config):
target_platform = self.msvs_target_platform.get(config, '')
if not target_platform:
target_platform = 'Win32'
return {'Win32': 'x86'}.get(target_platform, target_platform)
def _RealConfig(self, config):
target_platform = self.GetTargetPlatform(config)
if target_platform == 'x64' and not config.endswith('_x64'):
config += '_x64'
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
config = self._RealConfig(config)
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
config = self._RealConfig(config)
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._RealConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._RealConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._RealConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special):
"""Gets the explicitly overridden pdb name for a target or returns None
if it's not overridden."""
config = self._RealConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._RealConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def GetPrecompiledHeader(self, config, gyp_to_build_path):
"""Returns an object that handles the generation of precompiled header
build steps."""
config = self._RealConfig(config)
return _PchHelper(self, config, gyp_to_build_path)
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._RealConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._RealConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._RealConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._RealConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._RealConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('AdditionalOptions')
return libflags
def _GetDefFileAsLdflags(self, spec, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = ''
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
ldflags.append('/DEF:"%s"' % gyp_to_build_path(def_files[0]))
elif len(def_files) > 1:
raise Exception("Multiple .def files")
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, is_executable):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._RealConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(self.spec, ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special)
if pdb:
ldflags.append('/PDB:' + pdb)
ld('AdditionalOptions', prefix='')
ld('SubSystem', map={'1': 'CONSOLE', '2': 'WINDOWS'}, prefix='/SUBSYSTEM:')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration', map={'1': '/LTCG'})
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={ 'true': '/PROFILE'})
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
# TODO(scottmg): These too.
ldflags.extend(('kernel32.lib', 'user32.lib', 'gdi32.lib', 'winspool.lib',
'comdlg32.lib', 'advapi32.lib', 'shell32.lib', 'ole32.lib',
'oleaut32.lib', 'uuid.lib', 'odbc32.lib', 'DelayImp.lib'))
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest_file = self._GetLdManifestFlags(
config, manifest_base_name, is_executable and not have_def_file)
ldflags.extend(manifest_flags)
manifest_files = self._GetAdditionalManifestFiles(config, gyp_to_build_path)
manifest_files.append(intermediate_manifest_file)
return ldflags, manifest_files
def _GetLdManifestFlags(self, config, name, allow_isolation):
"""Returns the set of flags that need to be added to the link to generate
a default manifest, as well as the name of the generated file."""
# Add manifest flags that mirror the defaults in VS. Chromium dev builds
# do not currently use any non-default settings, but we could parse
# VCManifestTool blocks if Chromium or other projects need them in the
# future. Of particular note, we do not yet support EmbedManifest because
# it complicates incremental linking.
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
'''/MANIFESTUAC:"level='asInvoker' uiAccess='false'"'''
]
if allow_isolation:
flags.append('/ALLOWISOLATION')
return flags, output_name
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if (self._Setting(
('VCManifestTool', 'EmbedManifest'), config, default='') == 'true'):
print 'gyp/msvs_emulation.py: "EmbedManifest: true" not yet supported.'
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._RealConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._RealConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def HasExplicitIdlRules(self, spec):
"""Determine if there's an explicit rule for idl files. When there isn't we
need to generate implicit rules to build MIDL .idl files."""
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
return True
return False
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._RealConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
flags = ['/char', 'signed', '/env', 'win32', '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(self, settings, config, gyp_to_build_path):
self.settings = settings
self.config = config
self.gyp_to_build_path = gyp_to_build_path
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def _PchSource(self):
"""Get the source file that is built once to compile the pch data."""
return self.gyp_to_build_path(
self.settings.msvs_precompiled_source[self.config])
def _PchOutput(self):
"""Get the name of the output of the compiled pch data."""
return '${pchprefix}.' + self._PchHeader() + '.pch'
def GetObjDependencies(self, sources, objs):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatability
with make.py on Mac, and xcode_emulation.py."""
if not self._PchHeader():
return []
source = self._PchSource()
assert source
pch_ext = os.path.splitext(self._PchSource())[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self._PchOutput())]
return []
def GetPchBuildCommands(self):
"""Returns [(path_to_pch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory."""
header = self._PchHeader()
source = self._PchSource()
if not source or not header:
return []
ext = os.path.splitext(source)[1]
lang = 'c' if ext == '.c' else 'cc'
return [(self._PchOutput(), '/Yc' + header, lang, source)]
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path."""
vs = GetVSVersion(generator_flags)
for arch in ('x86', 'x64'):
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
| mit |
xjsender/haoide | requests/cookies.py | 177 | 17387 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. See keys() and values(). Allows client-code to call
``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
pairs."""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as
args name and optional domain and path. Returns a cookie.value. If
there are conflicting cookies, _find arbitrarily chooses one. See
_find_no_duplicates if you want an exception thrown if there are
conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests. Takes as args name and optional domain and
path. Returns a cookie.value. Throws KeyError if cookie is not found
and CookieConflictError if there are multiple cookies that match name
and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| mit |
rcwoolley/device-cloud-python | device_cloud/osal.py | 1 | 3073 | '''
Copyright (c) 2016-2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
'''
"""
Operating System Abstraction Layer (OSAL). This module provides abstractions of
functions that are different on different operating systems.
"""
import os
import platform
import subprocess
import sys
# Constants
NOT_SUPPORTED = -20
EXECUTION_FAILURE = -21
BAD_PARAMETER = -22
# Setup platform info statics
WIN32 = sys.platform.startswith('win32')
LINUX = sys.platform.startswith('linux')
MACOS = sys.platform.startswith('darwin')
POSIX = LINUX or MACOS
OTHER = not POSIX and not WIN32
# Define Functions
def execl(*args):
"""
Replaces the current process with a new instance of the specified
executable. This function will only return if there is an issue starting the
new instance, in which case it will return false. Otherwise, it will not
return.
"""
retval = EXECUTION_FAILURE
if POSIX:
os.execvp(args[0], args)
elif WIN32:
os.execvp(sys.executable, args)
else:
retval = NOT_SUPPORTED
return retval
def os_kernel():
"""
Get the operating system's kernel version
"""
ker = "Unknown"
if LINUX:
ker = platform.release()
elif WIN32 or MACOS:
ker = platform.version()
return ker
def os_name():
"""
Get the operating system name
"""
name = "Unknown"
if LINUX:
distro = platform.linux_distribution()
plat = subprocess.check_output(["uname", "-o"])[:-1].decode()
name = "{} ({})".format(distro[0], plat)
elif WIN32:
name = platform.system()
elif MACOS:
name = "macOS"
return name
def os_version():
"""
Get the operating system version
"""
ver = "Unknown"
if LINUX:
distro = platform.linux_distribution()
ver = "{}-{}".format(distro[1], distro[2])
elif WIN32:
ver = platform.release()
elif MACOS:
ver = platform.mac_ver()[0]
return ver
def system_reboot(delay=0, force=True):
"""
Reboot the system.
"""
return system_shutdown(delay=delay, reboot=True, force=force)
def system_shutdown(delay=0, reboot=False, force=True):
"""
Run the system shutdown command. Can be used to reboot the system.
"""
command = "shutdown "
if POSIX:
command += "-r " if reboot else "-h "
command += "now " if delay == 0 else "+{} ".format(delay)
elif WIN32:
command += "/r " if reboot else "/s "
command += "/t {} ".format(delay*60)
command += "/f" if force else ""
else:
return NOT_SUPPORTED
return os.system(command)
| apache-2.0 |
agry/NGECore2 | scripts/mobiles/tatooine/shinn_guard.py | 2 | 1336 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('tatooine_shinn_guard')
mobileTemplate.setLevel(8)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("shinn mugger")
mobileTemplate.setAssistRange(5)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_mugger.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('shinn_guard', mobileTemplate)
return | lgpl-3.0 |
hztony/robotframework-selenium2library | test/unit/locators/test_elementfinder.py | 28 | 15184 | import unittest
import os
from Selenium2Library.locators import ElementFinder
from mockito import *
class ElementFinderTests(unittest.TestCase):
def test_find_with_invalid_prefix(self):
finder = ElementFinder()
browser = mock()
try:
self.assertRaises(ValueError, finder.find, browser, "something=test1")
except ValueError as e:
self.assertEqual(e.message, "Element locator with prefix 'something' is not supported")
def test_find_with_null_browser(self):
finder = ElementFinder()
self.assertRaises(AssertionError,
finder.find, None, "id=test1")
def test_find_with_null_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, None)
def test_find_with_empty_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, "")
def test_find_with_no_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1")
verify(browser).find_elements_by_xpath("//*[(@id='test1' or @name='test1')]")
def test_find_with_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='div')
verify(browser).find_elements_by_xpath("//div[(@id='test1' or @name='test1')]")
def test_find_with_locator_with_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test '1'")
verify(browser).find_elements_by_xpath("//*[(@id=\"test '1'\" or @name=\"test '1'\")]")
def test_find_with_locator_with_quote(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\"")
verify(browser).find_elements_by_xpath("//*[(@id='test \"1\"' or @name='test \"1\"')]")
def test_find_with_locator_with_quote_and_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\" and '2'")
verify(browser).find_elements_by_xpath(
"//*[(@id=concat('test \"1\" and ', \"'\", '2', \"'\", '') or @name=concat('test \"1\" and ', \"'\", '2', \"'\", ''))]")
def test_find_with_a(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='a')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_link_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='link')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_img(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='img')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_image_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='image')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_input(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='input')
verify(browser).find_elements_by_xpath(
"//input[(@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_radio_button_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='radio button')
verify(browser).find_elements_by_xpath(
"//input[@type='radio' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_checkbox_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='checkbox')
verify(browser).find_elements_by_xpath(
"//input[@type='checkbox' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_file_upload_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='file upload')
verify(browser).find_elements_by_xpath(
"//input[@type='file' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_text_field_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='text field')
verify(browser).find_elements_by_xpath(
"//input[@type='text' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_button(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='button')
verify(browser).find_elements_by_xpath(
"//button[(@id='test1' or @name='test1' or @value='test1' or normalize-space(descendant-or-self::text())='test1')]")
def test_find_with_select(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='select')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_list_synonym(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='list')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_implicit_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_identifier(self):
finder = ElementFinder()
browser = mock()
id_elements = self._make_mock_elements('div', 'a')
name_elements = self._make_mock_elements('span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(list(id_elements)).thenReturn(list(id_elements))
when(browser).find_elements_by_name("test1").thenReturn(list(name_elements)).thenReturn(list(name_elements))
all_elements = list(id_elements)
all_elements.extend(name_elements)
result = finder.find(browser, "identifier=test1")
self.assertEqual(result, all_elements)
result = finder.find(browser, "identifier=test1", tag='a')
self.assertEqual(result, [id_elements[1], name_elements[1]])
def test_find_by_id(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_name("test1").thenReturn(elements)
result = finder.find(browser, "name=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "name=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "xpath=//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "xpath=//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_dom(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).execute_script("return document.getElementsByTagName('a');").thenReturn(
[elements[1], elements[3]])
result = finder.find(browser, "dom=document.getElementsByTagName('a')")
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_link_text(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_link_text("my link").thenReturn(elements)
result = finder.find(browser, "link=my link")
self.assertEqual(result, elements)
result = finder.find(browser, "link=my link", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_css_selector(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_css_selector("#test1").thenReturn(elements)
result = finder.find(browser, "css=#test1")
self.assertEqual(result, elements)
result = finder.find(browser, "css=#test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_tag_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_tag_name("div").thenReturn(elements)
result = finder.find(browser, "tag=div")
self.assertEqual(result, elements)
result = finder.find(browser, "tag=div", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_with_sloppy_prefix(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "ID=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "iD=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, " id =test1")
self.assertEqual(result, elements)
def test_find_with_sloppy_criteria(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id= test1 ")
self.assertEqual(result, elements)
def test_find_by_id_with_synonym_and_constraints(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'input', 'span', 'input', 'a', 'input', 'div', 'input')
elements[1].set_attribute('type', 'radio')
elements[3].set_attribute('type', 'checkbox')
elements[5].set_attribute('type', 'text')
elements[7].set_attribute('type', 'file')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='input')
self.assertEqual(result, [elements[1], elements[3], elements[5], elements[7]])
result = finder.find(browser, "id=test1", tag='radio button')
self.assertEqual(result, [elements[1]])
result = finder.find(browser, "id=test1", tag='checkbox')
self.assertEqual(result, [elements[3]])
result = finder.find(browser, "id=test1", tag='text field')
self.assertEqual(result, [elements[5]])
result = finder.find(browser, "id=test1", tag='file upload')
self.assertEqual(result, [elements[7]])
def test_find_returns_bad_values(self):
finder = ElementFinder()
browser = mock()
# selenium.webdriver.ie.webdriver.WebDriver sometimes returns these
for bad_value in (None, {'': None}):
for func_name in ('find_elements_by_id', 'find_elements_by_name',
'find_elements_by_xpath', 'find_elements_by_link_text',
'find_elements_by_css_selector', 'find_elements_by_tag_name'):
when_find_func = getattr(when(browser), func_name)
when_find_func(any()).thenReturn(bad_value)
for locator in ("identifier=it", "id=it", "name=it", "xpath=//div",
"link=it", "css=div.it", "tag=div", "default"):
result = finder.find(browser, locator)
self.assertEqual(result, [])
result = finder.find(browser, locator, tag='div')
self.assertEqual(result, [])
def _make_mock_elements(self, *tags):
elements = []
for tag in tags:
element = self._make_mock_element(tag)
elements.append(element)
return elements
def _make_mock_element(self, tag):
element = mock()
element.tag_name = tag
element.attributes = {}
def set_attribute(name, value):
element.attributes[name] = value
element.set_attribute = set_attribute
def get_attribute(name):
return element.attributes[name]
element.get_attribute = get_attribute
return element
| apache-2.0 |
dnarvaez/virtualenv-bootstrap | bootstrap.py | 1 | 4429 | #!/usr/bin/env python3
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is from https://github.com/dnarvaez/virtualenv-bootstrap
import hashlib
import json
import os
import shutil
import subprocess
import sys
import tarfile
import urllib.request
base_dir = os.path.dirname(os.path.abspath(__file__))
environ_namespace = "TEST"
start_message = "Installing virtualenv"
end_message = "\n"
packages = ["osourcer"]
submodules = []
virtualenv_version = "1.8.4"
virtualenv_dir = "sandbox"
cache_dir = "cache"
run_module = "osourcer.tool"
etag = "1"
def get_cache_dir():
return os.path.join(base_dir, cache_dir)
def get_virtualenv_dir():
return os.path.join(base_dir, virtualenv_dir)
def get_stamp_path():
return get_virtualenv_dir() + ".stamp"
def get_bin_path(name):
return os.path.join(get_virtualenv_dir(), "bin", name)
def create_virtualenv():
source_dir = os.path.join(get_cache_dir(),
"virtualenv-%s" % virtualenv_version)
if not os.path.exists(source_dir):
url = "https://pypi.python.org/packages/source/v/" \
"virtualenv/virtualenv-%s.tar.gz" % virtualenv_version
f = urllib.request.urlopen(url)
with tarfile.open(fileobj=f, mode="r:gz") as tar:
tar.extractall(get_cache_dir())
subprocess.check_call(["python3",
os.path.join(source_dir, "virtualenv.py"),
"-q", get_virtualenv_dir()])
def get_submodule_dirs():
return [os.path.join(base_dir, submodule) for submodule in submodules]
def install_packages():
args = [get_bin_path("pip"), "-q", "install"]
args.extend(packages)
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def upgrade_submodules():
args = [get_bin_path("pip"), "-q", "install", "--no-deps", "--upgrade"]
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def compute_submodules_hash():
data = ""
for submodule in submodules:
for root, dirs, files in os.walk(os.path.join(base_dir, submodule)):
for name in files:
path = os.path.join(root, name)
mtime = os.lstat(path).st_mtime
data = "%s%s %s\n" % (data, mtime, path)
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def check_stamp():
try:
with open(get_stamp_path()) as f:
stamp = json.load(f)
except (IOError, ValueError):
return True, True
return (stamp["etag"] != etag,
stamp["submodules_hash"] != compute_submodules_hash())
def write_stamp():
stamp = {"etag": etag,
"submodules_hash": compute_submodules_hash()}
with open(get_stamp_path(), "w") as f:
json.dump(stamp, f)
def update_submodules():
update = os.environ.get(environ_namespace + "_UPDATE_SUBMODULES", "yes")
if update != "yes":
return
os.chdir(base_dir)
for module in submodules:
subprocess.check_call(["git", "submodule", "update", "--init",
module])
def main():
os.environ["PIP_DOWNLOAD_CACHE"] = get_cache_dir()
os.environ[environ_namespace + "_BASE_DIR"] = base_dir
os.environ[environ_namespace + "_VIRTUALENV"] = get_virtualenv_dir()
etag_changed, submodules_changed = check_stamp()
if etag_changed:
print(start_message)
update_submodules()
try:
shutil.rmtree(get_virtualenv_dir())
except OSError:
pass
create_virtualenv()
install_packages()
write_stamp()
print(end_message)
elif submodules_changed:
upgrade_submodules()
write_stamp()
args = [get_bin_path("python3"), "-m", run_module]
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
os.execl(args[0], *args)
if __name__ == "__main__":
main()
| apache-2.0 |
shawnsi/ansible-modules-core | cloud/digital_ocean/digital_ocean.py | 23 | 14677 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean
short_description: Create/delete a droplet/SSH_key in DigitalOcean
description:
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3"
author: "Vincent Viallet (@zbal)"
options:
command:
description:
- Which target you want to operate on.
default: droplet
choices: ['droplet', 'ssh']
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'active', 'absent', 'deleted']
api_token:
description:
- DigitalOcean api token.
version_added: "1.9.5"
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name:
description:
- Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
size_id:
description:
- This is the slug of the size you would like the droplet created with.
image_id:
description:
- This is the slug of the image you would like the droplet created with.
region_id:
description:
- This is the slug of the region you would like your server to be created in.
ssh_key_ids:
description:
- Optional, array of of SSH key (numeric) ID that you would like to be added to the server.
virtio:
description:
- "Bool, turn on virtio driver in droplet for improved network and storage I/O."
version_added: "1.4"
default: "yes"
choices: [ "yes", "no" ]
private_networking:
description:
- "Bool, add an additional, private network interface to droplet for inter-droplet communication."
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
backups_enabled:
description:
- Optional, Boolean, enables backups for your droplet.
version_added: "1.6"
default: "no"
choices: [ "yes", "no" ]
user_data:
description:
- opaque blob of data which is made available to the droplet
version_added: "2.0"
required: false
default: None
wait:
description:
- Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned.
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean:
state: present
command: ssh
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
api_token: XXX
# Create a new Droplet
# Will return the droplet details including the droplet id (used for idempotence)
- digital_ocean:
state: present
command: droplet
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
wait_timeout: 500
register: my_droplet
- debug: msg="ID is {{ my_droplet.droplet.id }}"
- debug: msg="IP is {{ my_droplet.droplet.ip_address }}"
# Ensure a droplet is present
# If droplet id already exist, will return the droplet details and changed = False
# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True.
- digital_ocean:
state: present
command: droplet
id: 123
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
wait_timeout: 500
# Create a droplet with ssh key
# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids).
# Several keys can be added to ssh_key_ids as id1,id2,id3
# The keys are used to connect as root to the droplet.
- digital_ocean:
state: present
ssh_key_ids: 123,456
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
'''
import os
import time
from distutils.version import LooseVersion
HAS_DOPY = True
try:
import dopy
from dopy.manager import DoError, DoManager
if LooseVersion(dopy.__version__) < LooseVersion('0.3.2'):
HAS_DOPY = False
except ImportError:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class Droplet(JsonfyMixIn):
manager = None
def __init__(self, droplet_json):
self.status = 'new'
self.__dict__.update(droplet_json)
def is_powered_on(self):
return self.status == 'active'
def update_attr(self, attrs=None):
if attrs:
for k, v in attrs.iteritems():
setattr(self, k, v)
else:
json = self.manager.show_droplet(self.id)
if json['ip_address']:
self.update_attr(json)
def power_on(self):
assert self.status == 'off', 'Can only power on a closed one.'
json = self.manager.power_on_droplet(self.id)
self.update_attr(json)
def ensure_powered_on(self, wait=True, wait_timeout=300):
if self.is_powered_on():
return
if self.status == 'off': # powered off
self.power_on()
if wait:
end_time = time.time() + wait_timeout
while time.time() < end_time:
time.sleep(min(20, end_time - time.time()))
self.update_attr()
if self.is_powered_on():
if not self.ip_address:
raise TimeoutError('No ip is found.', self.id)
return
raise TimeoutError('Wait for droplet running timeout', self.id)
def destroy(self):
return self.manager.destroy_droplet(self.id, scrub_data=True)
@classmethod
def setup(cls, api_token):
cls.manager = DoManager(None, api_token, api_version=2)
@classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None):
private_networking_lower = str(private_networking).lower()
backups_enabled_lower = str(backups_enabled).lower()
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled_lower,user_data)
droplet = cls(json)
return droplet
@classmethod
def find(cls, id=None, name=None):
if not id and not name:
return False
droplets = cls.list_all()
# Check first by id. digital ocean requires that it be unique
for droplet in droplets:
if droplet.id == id:
return droplet
# Failing that, check by hostname.
for droplet in droplets:
if droplet.name == name:
return droplet
return False
@classmethod
def list_all(cls):
json = cls.manager.all_active_droplets()
return map(cls, json)
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, api_token):
cls.manager = DoManager(None, api_token, api_version=2)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
command = module.params['command']
state = module.params['state']
if command == 'droplet':
Droplet.setup(api_token)
if state in ('active', 'present'):
# First, try to find a droplet by id.
droplet = Droplet.find(id=module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
# If both of those attempts failed, then create a new droplet.
if not droplet:
droplet = Droplet.add(
name=getkeyordie('name'),
size_id=getkeyordie('size_id'),
image_id=getkeyordie('image_id'),
region_id=getkeyordie('region_id'),
ssh_key_ids=module.params['ssh_key_ids'],
virtio=module.params['virtio'],
private_networking=module.params['private_networking'],
backups_enabled=module.params['backups_enabled'],
user_data=module.params.get('user_data'),
)
if droplet.is_powered_on():
changed = False
droplet.ensure_powered_on(
wait=getkeyordie('wait'),
wait_timeout=getkeyordie('wait_timeout')
)
module.exit_json(changed=changed, droplet=droplet.to_json())
elif state in ('absent', 'deleted'):
# First, try to find a droplet by id.
droplet = Droplet.find(module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
if not droplet:
module.exit_json(changed=False, msg='The droplet is not found.')
event_json = droplet.destroy()
module.exit_json(changed=True)
elif command == 'ssh':
SSH.setup(api_token)
name = getkeyordie('name')
if state in ('active', 'present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent', 'deleted'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(choices=['droplet', 'ssh'], default='droplet'),
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
api_token = dict(aliases=['API_TOKEN'], no_log=True),
name = dict(type='str'),
size_id = dict(),
image_id = dict(),
region_id = dict(),
ssh_key_ids = dict(type='list'),
virtio = dict(type='bool', default='yes'),
private_networking = dict(type='bool', default='no'),
backups_enabled = dict(type='bool', default='no'),
id = dict(aliases=['droplet_id'], type='int'),
unique_name = dict(type='bool', default='no'),
user_data = dict(default=None),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300, type='int'),
ssh_pub_key = dict(type='str'),
),
required_together = (
['size_id', 'image_id', 'region_id'],
),
mutually_exclusive = (
['size_id', 'ssh_pub_key'],
['image_id', 'ssh_pub_key'],
['region_id', 'ssh_pub_key'],
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module')
try:
core(module)
except TimeoutError, e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception), e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
karwa/swift | utils/gyb_syntax_support/ExprNodes.py | 5 | 19975 | from Child import Child
from Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='PrefixAmpersandToken'),
Child('Expression', kind='Expr'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('TupleExprElementList', kind='SyntaxCollection',
element='TupleExprElement'),
Node('ArrayElementList', kind='SyntaxCollection',
element='ArrayElement'),
Node('DictionaryElementList', kind='SyntaxCollection',
element='DictionaryElement'),
Node('StringLiteralSegments', kind='SyntaxCollection',
element='Syntax', element_name='Segment',
element_choices=['StringSegment', 'ExpressionSegment']),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryExpr', kind='Expr',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child('Expression', kind='Expr'),
]),
# declname-arguments -> '(' declname-argument-list ')'
# declname-argument-list -> declname-argument*
# declname-argument -> identifier ':'
Node('DeclNameArgument', kind='Syntax',
children=[
Child('Name', kind='Token'),
Child('Colon', kind='ColonToken'),
]),
Node('DeclNameArgumentList', kind='SyntaxCollection',
element='DeclNameArgument'),
Node('DeclNameArguments', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='DeclNameArgumentList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
# An identifier expression.
Node('IdentifierExpr', kind='Expr',
children=[
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'SelfToken',
'CapitalSelfToken',
'DollarIdentifierToken',
'SpacedBinaryOperatorToken',
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# An 'super' expression.
Node('SuperRefExpr', kind='Expr',
children=[
Child('SuperKeyword', kind='SuperToken'),
]),
# A nil expression.
Node('NilLiteralExpr', kind='Expr',
children=[
Child('NilKeyword', kind='NilToken'),
]),
# A _ expression.
Node('DiscardAssignmentExpr', kind='Expr',
children=[
Child('Wildcard', kind='WildcardToken'),
]),
# An = expression.
Node('AssignmentExpr', kind='Expr',
children=[
Child('AssignToken', kind='EqualToken'),
]),
# A flat list of expressions before sequence folding, e.g. 1 + 2 + 3.
Node('SequenceExpr', kind='Expr',
children=[
Child('Elements', kind='ExprList',
collection_element_name='Element'),
]),
Node('ExprList', kind='SyntaxCollection',
element='Expr',
element_name='Expression',
description='''
A list of expressions connected by operators. This list is contained
by a `SequenceExprSyntax`.
'''),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #filePath expression.
Node('PoundFilePathExpr', kind='Expr',
children=[
Child('PoundFilePath', kind='PoundFilePathToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# A #dsohandle expression.
Node('PoundDsohandleExpr', kind='Expr',
children=[
Child('PoundDsohandle', kind='PoundDsohandleToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# An operator like + or -.
# NOTE: This appears only in SequenceExpr.
Node('BinaryOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='BinaryOperatorToken'),
]),
# arrow-expr -> 'throws'? '->'
# NOTE: This appears only in SequenceExpr.
Node('ArrowExpr', kind='Expr',
children=[
Child('ThrowsToken', kind='ThrowsToken',
is_optional=True),
Child('ArrowToken', kind='ArrowToken'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('FloatingDigits', kind='FloatingLiteralToken'),
]),
Node('TupleExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='TupleExprElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# Array literal, e.g. [1, 2, 3]
Node('ArrayExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Elements', kind='ArrayElementList',
collection_element_name='Element'),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# Dictionary literal, e.g. [1:1, 2:2, 3:3]
Node('DictionaryExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Content', kind='Syntax',
node_choices=[
Child('Colon', kind='ColonToken'),
Child('Elements', kind='DictionaryElementList'),
]),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# An element inside a tuple element list
Node('TupleExprElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# element inside an array expression: expression ','?
Node('ArrayElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# element inside an array expression: expression ','?
Node('DictionaryElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('KeyExpression', kind='Expr'),
Child('Colon', kind='ColonToken'),
Child('ValueExpression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Digits', kind='IntegerLiteralToken'),
]),
# true or false
Node('BooleanLiteralExpr', kind='Expr',
children=[
Child("BooleanLiteral", kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
])
]),
# a ? 1 : 0
Node('TernaryExpr', kind='Expr',
children=[
Child("ConditionExpression", kind='Expr'),
Child("QuestionMark", kind='InfixQuestionMarkToken'),
Child("FirstChoice", kind='Expr'),
Child("ColonMark", kind='ColonToken'),
Child("SecondChoice", kind='Expr')
]),
# expr?.name
Node('MemberAccessExpr', kind='Expr',
children=[
# The base needs to be optional to parse expressions in key paths
# like \.a
Child("Base", kind='Expr', is_optional=True),
Child("Dot", kind='Token',
token_choices=[
'PeriodToken', 'PrefixPeriodToken'
]),
# Name could be 'self'
Child("Name", kind='Token'),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# is TypeName
Node('IsExpr', kind='Expr',
children=[
Child("IsTok", kind='IsToken'),
Child("TypeName", kind='Type')
]),
# as TypeName
Node('AsExpr', kind='Expr',
children=[
Child("AsTok", kind='AsToken'),
Child("QuestionOrExclamationMark", kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child("TypeName", kind='Type')
]),
# Type
Node('TypeExpr', kind='Expr',
children=[
Child('Type', kind='Type'),
]),
Node('ClosureCaptureItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
# FIXME: Add a 'CaptureSpecifier' node kind for `Specifier`.
Child("Specifier", kind='TokenList',
collection_element_name='SpecifierToken', is_optional=True),
Child("Name", kind='IdentifierToken', is_optional=True),
Child('AssignToken', kind='EqualToken', is_optional=True),
Child("Expression", kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('ClosureCaptureItemList', kind='SyntaxCollection',
element='ClosureCaptureItem'),
Node('ClosureCaptureSignature', kind='Syntax',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Items', kind='ClosureCaptureItemList',
collection_element_name='Item', is_optional=True),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
Node('ClosureParam', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# a, b, c
Node('ClosureParamList', kind='SyntaxCollection', element='ClosureParam'),
Node('ClosureSignature', kind='Syntax',
children=[
Child('Capture', kind='ClosureCaptureSignature',
is_optional=True),
Child('Input', kind='Syntax', is_optional=True,
node_choices=[
Child('SimpleInput', kind='ClosureParamList'),
Child('Input', kind='ParameterClause'),
]),
Child('ThrowsTok', kind='ThrowsToken', is_optional=True),
Child('Output', kind='ReturnClause', is_optional=True),
Child('InTok', kind='InToken'),
]),
Node('ClosureExpr', kind='Expr',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Signature', kind='ClosureSignature', is_optional=True),
Child('Statements', kind='CodeBlockItemList',
collection_element_name='Statement'),
Child('RightBrace', kind='RightBraceToken'),
]),
# unresolved-pattern-expr -> pattern
Node('UnresolvedPatternExpr', kind='Expr',
children=[
Child('Pattern', kind='Pattern'),
]),
# call-expr -> expr '(' call-argument-list ')' closure-expr?
# | expr closure-expr
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken',
is_optional=True),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
]),
# subscript-expr -> expr '[' call-argument-list ']' closure-expr?
Node('SubscriptExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftBracket', kind='LeftSquareBracketToken'),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightBracket', kind='RightSquareBracketToken'),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
]),
# optional-chaining-expr -> expr '?'
Node('OptionalChainingExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# forced-value-expr -> expr '!'
Node('ForcedValueExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# postfix-unary-expr -> expr postfix-operator
Node('PostfixUnaryExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('OperatorToken', kind='PostfixOperatorToken'),
]),
# specialize-expr -> expr generic-argument-clause?
Node('SpecializeExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('GenericArgumentClause', kind='GenericArgumentClause'),
]),
# string literal segment in a string interpolation expression.
Node('StringSegment', kind='Syntax',
children=[
Child('Content', kind='StringSegmentToken'),
]),
# expression segment in a string interpolation expression.
Node('ExpressionSegment', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('Backslash', kind='BackslashToken'),
Child('Delimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('LeftParen', kind='LeftParenToken',
classification='StringInterpolationAnchor',
force_classification=True),
Child('Expressions', kind='TupleExprElementList',
collection_element_name='Expression'),
Child('RightParen', kind='StringInterpolationAnchorToken'),
]),
# e.g. "abc \(foo()) def"
Node('StringLiteralExpr', kind='Expr',
children=[
Child('OpenDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('OpenQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('Segments', kind='StringLiteralSegments',
collection_element_name='Segment'),
Child('CloseQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('CloseDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
]),
# e.g. "\a.b[2].a"
Node('KeyPathExpr', kind='Expr',
children=[
Child('Backslash', kind='BackslashToken'),
Child('RootExpr', kind='Expr', is_optional=True,
node_choices=[
Child('IdentifierExpr', kind='IdentifierExpr'),
Child('SpecializeExpr', kind='SpecializeExpr')
]),
Child('Expression', kind='Expr'),
]),
# The period in the key path serves as the base on which the
# right-hand-side of the key path is evaluated
Node('KeyPathBaseExpr', kind='Expr',
children=[
Child('Period', kind='PeriodToken'),
]),
# e.g. "a." or "a"
Node('ObjcNamePiece', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('Dot', kind='PeriodToken', is_optional=True),
]),
# e.g. "a.b.c"
Node('ObjcName', kind='SyntaxCollection', element='ObjcNamePiece'),
# e.g. "#keyPath(a.b.c)"
Node('ObjcKeyPathExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('KeyPath', kind='PoundKeyPathToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='ObjcName',
collection_element_name='NamePiece'),
Child('RightParen', kind='RightParenToken'),
]),
# e.g. "#selector(getter:Foo.bar)"
Node('ObjcSelectorExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('PoundSelector', kind='PoundSelectorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Kind', kind='ContextualKeywordToken',
text_choices=['getter', 'setter'],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Name', kind='Expr'),
Child('RightParen', kind='RightParenToken'),
]),
# <#content#>
Node('EditorPlaceholderExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
]),
# #fileLiteral(a, b, c)
Node('ObjectLiteralExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('Identifier', kind='Token',
token_choices=[
'PoundColorLiteralToken',
'PoundFileLiteralToken',
'PoundImageLiteralToken',
]),
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
]
| apache-2.0 |
creimers/django-shop | shop/tests/api.py | 16 | 2260 | from shop.models.ordermodel import OrderExtraInfo, Order
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from shop.tests.util import Mock
from shop.shop_api import ShopAPI
from decimal import Decimal
class ShopApiTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username="test",
email="test@example.com")
self.request = Mock()
setattr(self.request, 'user', None)
self.order = Order()
self.order.order_subtotal = Decimal('10.95')
self.order.order_total = Decimal('10.95')
self.order.shipping_cost = Decimal('0')
self.order.shipping_address_text = 'shipping address example'
self.order.billing_address_text = 'billing address example'
self.order.save()
def test_add_extra_info(self):
api = ShopAPI()
api.add_extra_info(self.order, 'test')
# Assert that an ExtraOrderInfo item was created
oei = OrderExtraInfo.objects.get(order=self.order)
self.assertEqual(oei.text, 'test')
def test_is_order_paid(self):
api = ShopAPI()
# Ensure deprecated method still works
res = api.is_order_payed(self.order)
self.assertEqual(res, False)
res = api.is_order_paid(self.order)
self.assertEqual(res, False)
def test_is_order_complete(self):
api = ShopAPI()
res = api.is_order_completed(self.order)
self.assertEqual(res, False)
def test_get_order_total(self):
api = ShopAPI()
res = api.get_order_total(self.order)
self.assertEqual(res, Decimal('10.95'))
def test_get_order_subtotal(self):
api = ShopAPI()
res = api.get_order_subtotal(self.order)
self.assertEqual(res, Decimal('10.95'))
def test_get_order_short_name(self):
api = ShopAPI()
res = api.get_order_short_name(self.order)
self.assertEqual(res, '1-10.95')
def test_get_order_unique_id(self):
api = ShopAPI()
res = api.get_order_unique_id(self.order)
self.assertEqual(res, 1)
def test_get_order_for_id(self):
api = ShopAPI()
res = api.get_order_for_id(1)
self.assertEqual(res, self.order)
| bsd-3-clause |
omapzoom/platform-external-chromium | net/tools/testserver/asn1der.py | 67 | 1324 | #!/usr/bin/python2.5
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper module for ASN.1/DER encoding."""
import binascii
import struct
# Tags as defined by ASN.1.
INTEGER = 2
BIT_STRING = 3
NULL = 5
OBJECT_IDENTIFIER = 6
SEQUENCE = 0x30
def Data(tag, data):
"""Generic type-length-value encoder.
Args:
tag: the tag.
data: the data for the given tag.
Returns:
encoded TLV value.
"""
if len(data) == 0:
return struct.pack(">BB", tag, 0);
assert len(data) <= 0xffff;
return struct.pack(">BBH", tag, 0x82, len(data)) + data;
def Integer(value):
"""Encodes an integer.
Args:
value: the long value.
Returns:
encoded TLV value.
"""
data = '%x' % value
return Data(INTEGER, binascii.unhexlify('00' + '0' * (len(data) % 2) + data))
def Bitstring(value):
"""Encodes a bit string.
Args:
value: a string holding the binary data.
Returns:
encoded TLV value.
"""
return Data(BIT_STRING, '\x00' + value)
def Sequence(values):
"""Encodes a sequence of other values.
Args:
values: the list of values, must be strings holding already encoded data.
Returns:
encoded TLV value.
"""
return Data(SEQUENCE, ''.join(values))
| bsd-3-clause |
petecummings/django | tests/check_framework/test_templates.py | 288 | 1403 | from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
tutumcloud/azure-sdk-for-python | tests/test_managementcertificatemanagementservice.py | 5 | 6981 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import unittest
from azure.servicemanagement import ServiceManagementService
from util import (
AzureTestCase,
credentials,
getUniqueName,
set_service_options,
)
MANAGEMENT_CERT_PUBLICKEY = 'MIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQAB'
MANAGEMENT_CERT_DATA = 'MIIC9jCCAeKgAwIBAgIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAMBUxEzARBgNVBAMTClB5dGhvblRlc3QwHhcNMTIwODMwMDAyNTMzWhcNMzkxMjMxMjM1OTU5WjAVMRMwEQYDVQQDEwpQeXRob25UZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQABo0owSDBGBgNVHQEEPzA9gBBS6knRHo54LppngxVCCzZVoRcwFTETMBEGA1UEAxMKUHl0aG9uVGVzdIIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAA4IBAQAnZbP3YV+08wI4YTg6MOVA+j1njd0kVp35FLehripmaMNE6lgk3Vu1MGGl0JnvMr3fNFGFzRske/jVtFxlHE5H/CoUzmyMQ+W06eV/e995AduwTKsS0ZgYn0VoocSXWst/nyhpKOcbJgAOohOYxgsGI1JEqQgjyeqzcCIhw/vlWiA3V8bSiPnrC9vwhH0eB025hBd2VbEGDz2nWCYkwtuOLMTvkmLi/oFw3GOfgagZKk8k/ZPffMCafz+yR3vb1nqAjncrVcJLI8amUfpxhjZYexo8MbxBA432M6w8sjXN+uLCl7ByWZ4xs4vonWgkmjeObtU37SIzolHT4dxIgaP2'
MANAGEMENT_CERT_THUMBRINT = 'BEA4B74BD6B915E9DD6A01FB1B8C3C1740F517F2'
#------------------------------------------------------------------------------
class ManagementCertificateManagementServiceTest(AzureTestCase):
def setUp(self):
self.sms = ServiceManagementService(credentials.getSubscriptionId(),
credentials.getManagementCertFile())
set_service_options(self.sms)
self.certificate_thumbprints = []
def tearDown(self):
for thumbprint in self.certificate_thumbprints:
try:
self.sms.delete_management_certificate(thumbprint)
except:
pass
#--Helpers-----------------------------------------------------------------
def _create_management_certificate(self, cert):
self.certificate_thumbprints.append(cert.thumbprint)
result = self.sms.add_management_certificate(cert.public_key,
cert.thumbprint,
cert.data)
self.assertIsNone(result)
def _management_certificate_exists(self, thumbprint):
try:
props = self.sms.get_management_certificate(thumbprint)
return props is not None
except:
return False
#--Test cases for management certificates ----------------------------
def test_list_management_certificates(self):
# Arrange
local_cert = _local_certificate()
self._create_management_certificate(local_cert)
# Act
result = self.sms.list_management_certificates()
# Assert
self.assertIsNotNone(result)
self.assertTrue(len(result) > 0)
cert = None
for temp in result:
if temp.subscription_certificate_thumbprint == \
local_cert.thumbprint:
cert = temp
break
self.assertIsNotNone(cert)
self.assertIsNotNone(cert.created)
self.assertEqual(cert.subscription_certificate_public_key,
local_cert.public_key)
self.assertEqual(cert.subscription_certificate_data, local_cert.data)
self.assertEqual(cert.subscription_certificate_thumbprint,
local_cert.thumbprint)
def test_get_management_certificate(self):
# Arrange
local_cert = _local_certificate()
self._create_management_certificate(local_cert)
# Act
result = self.sms.get_management_certificate(local_cert.thumbprint)
# Assert
self.assertIsNotNone(result)
self.assertIsNotNone(result.created)
self.assertEqual(result.subscription_certificate_public_key,
local_cert.public_key)
self.assertEqual(result.subscription_certificate_data, local_cert.data)
self.assertEqual(result.subscription_certificate_thumbprint,
local_cert.thumbprint)
def test_add_management_certificate(self):
# Arrange
local_cert = _local_certificate()
# Act
self.certificate_thumbprints.append(local_cert.thumbprint)
result = self.sms.add_management_certificate(local_cert.public_key,
local_cert.thumbprint,
local_cert.data)
# Assert
self.assertIsNone(result)
self.assertTrue(
self._management_certificate_exists(local_cert.thumbprint))
def test_delete_management_certificate(self):
# Arrange
local_cert = _local_certificate()
self._create_management_certificate(local_cert)
# Act
result = self.sms.delete_management_certificate(local_cert.thumbprint)
# Assert
self.assertIsNone(result)
self.assertFalse(
self._management_certificate_exists(local_cert.thumbprint))
class LocalCertificate(object):
def __init__(self, thumbprint='', data='', public_key=''):
self.thumbprint = thumbprint
self.data = data
self.public_key = public_key
def _local_certificate():
# It would be nice to dynamically create this data, so that it is unique
# But for now, we always create the same certificate
return LocalCertificate(MANAGEMENT_CERT_THUMBRINT,
MANAGEMENT_CERT_DATA,
MANAGEMENT_CERT_PUBLICKEY)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
edcast-inc/edx-platform-edcast | common/lib/xmodule/xmodule/tests/test_lti20_unit.py | 174 | 17021 | # -*- coding: utf-8 -*-
"""Tests for LTI Xmodule LTIv2.0 functional logic."""
import datetime
import textwrap
from django.utils.timezone import UTC
from mock import Mock
from xmodule.lti_module import LTIDescriptor
from xmodule.lti_2_util import LTIError
from . import LogicTest
class LTI20RESTResultServiceTest(LogicTest):
"""Logic tests for LTI module. LTI2.0 REST ResultService"""
descriptor_class = LTIDescriptor
def setUp(self):
super(LTI20RESTResultServiceTest, self).setUp()
self.environ = {'wsgi.url_scheme': 'http', 'REQUEST_METHOD': 'POST'}
self.system.get_real_user = Mock()
self.system.publish = Mock()
self.system.rebind_noauth_module_to_user = Mock()
self.user_id = self.xmodule.runtime.anonymous_student_id
self.lti_id = self.xmodule.lti_id
self.xmodule.due = None
self.xmodule.graceperiod = None
def test_sanitize_get_context(self):
"""Tests that the get_context function does basic sanitization"""
# get_context, unfortunately, requires a lot of mocking machinery
mocked_course = Mock(name='mocked_course', lti_passports=['lti_id:test_client:test_secret'])
modulestore = Mock(name='modulestore')
modulestore.get_course.return_value = mocked_course
runtime = Mock(name='runtime', modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = "lti_id"
test_cases = ( # (before sanitize, after sanitize)
(u"plaintext", u"plaintext"),
(u"a <script>alert(3)</script>", u"a <script>alert(3)</script>"), # encodes scripts
(u"<b>bold 包</b>", u"<b>bold 包</b>"), # unicode, and <b> tags pass through
)
for case in test_cases:
self.xmodule.score_comment = case[0]
self.assertEqual(
case[1],
self.xmodule.get_context()['comment']
)
def test_lti20_rest_bad_contenttype(self):
"""
Input with bad content type
"""
with self.assertRaisesRegexp(LTIError, "Content-Type must be"):
request = Mock(headers={u'Content-Type': u'Non-existent'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
def test_lti20_rest_failed_oauth_body_verify(self):
"""
Input with bad oauth body hash verification
"""
err_msg = "OAuth body verification failed"
self.xmodule.verify_oauth_body_sign = Mock(side_effect=LTIError(err_msg))
with self.assertRaisesRegexp(LTIError, err_msg):
request = Mock(headers={u'Content-Type': u'application/vnd.ims.lis.v2.result+json'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
def test_lti20_rest_good_headers(self):
"""
Input with good oauth body hash verification
"""
self.xmodule.verify_oauth_body_sign = Mock(return_value=True)
request = Mock(headers={u'Content-Type': u'application/vnd.ims.lis.v2.result+json'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
# We just want the above call to complete without exceptions, and to have called verify_oauth_body_sign
self.assertTrue(self.xmodule.verify_oauth_body_sign.called)
BAD_DISPATCH_INPUTS = [
None,
u"",
u"abcd"
u"notuser/abcd"
u"user/"
u"user//"
u"user/gbere/"
u"user/gbere/xsdf"
u"user/ಠ益ಠ" # not alphanumeric
]
def test_lti20_rest_bad_dispatch(self):
"""
Test the error cases for the "dispatch" argument to the LTI 2.0 handler. Anything that doesn't
fit the form user/<anon_id>
"""
for einput in self.BAD_DISPATCH_INPUTS:
with self.assertRaisesRegexp(LTIError, "No valid user id found in endpoint URL"):
self.xmodule.parse_lti_2_0_handler_suffix(einput)
GOOD_DISPATCH_INPUTS = [
(u"user/abcd3", u"abcd3"),
(u"user/Äbcdè2", u"Äbcdè2"), # unicode, just to make sure
]
def test_lti20_rest_good_dispatch(self):
"""
Test the good cases for the "dispatch" argument to the LTI 2.0 handler. Anything that does
fit the form user/<anon_id>
"""
for ginput, expected in self.GOOD_DISPATCH_INPUTS:
self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)
BAD_JSON_INPUTS = [
# (bad inputs, error message expected)
([
u"kk", # ValueError
u"{{}", # ValueError
u"{}}", # ValueError
3, # TypeError
{}, # TypeError
], u"Supplied JSON string in request body could not be decoded"),
([
u"3", # valid json, not array or object
u"[]", # valid json, array too small
u"[3, {}]", # valid json, 1st element not an object
], u"Supplied JSON string is a list that does not contain an object as the first element"),
([
u'{"@type": "NOTResult"}', # @type key must have value 'Result'
], u"JSON object does not contain correct @type attribute"),
([
# @context missing
u'{"@type": "Result", "resultScore": 0.1}',
], u"JSON object does not contain required key"),
([
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 100}''' # score out of range
], u"score value outside the permitted range of 0-1."),
([
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": "1b"}''', # score ValueError
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": {}}''', # score TypeError
], u"Could not convert resultScore to float"),
]
def test_lti20_bad_json(self):
"""
Test that bad json_str to parse_lti_2_0_result_json inputs raise appropriate LTI Error
"""
for error_inputs, error_message in self.BAD_JSON_INPUTS:
for einput in error_inputs:
with self.assertRaisesRegexp(LTIError, error_message):
self.xmodule.parse_lti_2_0_result_json(einput)
GOOD_JSON_INPUTS = [
(u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 0.1}''', u""), # no comment means we expect ""
(u'''
[{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"resultScore": 0.1}]''', u""), # OK to have array of objects -- just take the first. @id is okay too
(u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 0.1,
"comment": "ಠ益ಠ"}''', u"ಠ益ಠ"), # unicode comment
]
def test_lti20_good_json(self):
"""
Test the parsing of good comments
"""
for json_str, expected_comment in self.GOOD_JSON_INPUTS:
score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)
self.assertEqual(score, 0.1)
self.assertEqual(comment, expected_comment)
GOOD_JSON_PUT = textwrap.dedent(u"""
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"resultScore": 0.1,
"comment": "ಠ益ಠ"}
""").encode('utf-8')
GOOD_JSON_PUT_LIKE_DELETE = textwrap.dedent(u"""
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"comment": "ಠ益ಠ"}
""").encode('utf-8')
def get_signed_lti20_mock_request(self, body, method=u'PUT'):
"""
Example of signed from LTI 2.0 Provider. Signatures and hashes are example only and won't verify
"""
mock_request = Mock()
mock_request.headers = {
'Content-Type': 'application/vnd.ims.lis.v2.result+json',
'Authorization': (
u'OAuth oauth_nonce="135685044251684026041377608307", '
u'oauth_timestamp="1234567890", oauth_version="1.0", '
u'oauth_signature_method="HMAC-SHA1", '
u'oauth_consumer_key="test_client_key", '
u'oauth_signature="my_signature%3D", '
u'oauth_body_hash="gz+PeJZuF2//n9hNUnDj2v5kN70="'
)
}
mock_request.url = u'http://testurl'
mock_request.http_method = method
mock_request.method = method
mock_request.body = body
return mock_request
USER_STANDIN = Mock()
USER_STANDIN.id = 999
def setup_system_xmodule_mocks_for_lti20_request_test(self):
"""
Helper fn to set up mocking for lti 2.0 request test
"""
self.system.get_real_user = Mock(return_value=self.USER_STANDIN)
self.xmodule.max_score = Mock(return_value=1.0)
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', u'test_client_secret'))
self.xmodule.verify_oauth_body_sign = Mock()
def test_lti20_put_like_delete_success(self):
"""
The happy path for LTI 2.0 PUT that acts like a delete
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT_LIKE_DELETE)
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert there's no score
self.assertEqual(response.status_code, 200)
self.assertIsNone(self.xmodule.module_score)
self.assertEqual(self.xmodule.score_comment, u"")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})
self.assertEqual(evt_type, 'grade')
def test_lti20_delete_success(self):
"""
The happy path for LTI 2.0 DELETE
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request("", method=u'DELETE')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert there's no score
self.assertEqual(response.status_code, 200)
self.assertIsNone(self.xmodule.module_score)
self.assertEqual(self.xmodule.score_comment, u"")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})
self.assertEqual(evt_type, 'grade')
def test_lti20_put_set_score_success(self):
"""
The happy path for LTI 2.0 PUT that sets a score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(self.xmodule.module_score, 0.1)
self.assertEqual(self.xmodule.score_comment, u"ಠ益ಠ")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(evt_type, 'grade')
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': 0.1, 'max_value': 1.0})
def test_lti20_get_no_score_success(self):
"""
The happy path for LTI 2.0 GET when there's no score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request("", method=u'GET')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result"})
def test_lti20_get_with_score_success(self):
"""
The happy path for LTI 2.0 GET when there is a score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request("", method=u'GET')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": SCORE,
"comment": COMMENT})
UNSUPPORTED_HTTP_METHODS = ["OPTIONS", "HEAD", "POST", "TRACE", "CONNECT"]
def test_lti20_unsupported_method_error(self):
"""
Test we get a 404 when we don't GET or PUT
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
for bad_method in self.UNSUPPORTED_HTTP_METHODS:
mock_request.method = bad_method
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_headers(self):
"""
Test that we get a 401 when header verification fails
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.verify_lti_2_0_result_rest_headers = Mock(side_effect=LTIError())
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 401)
def test_lti20_request_handler_bad_dispatch_user(self):
"""
Test that we get a 404 when there's no (or badly formatted) user specified in the url
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, None)
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_json(self):
"""
Test that we get a 404 when json verification fails
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.parse_lti_2_0_result_json = Mock(side_effect=LTIError())
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_user(self):
"""
Test that we get a 404 when the supplied user does not exist
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.system.get_real_user = Mock(return_value=None)
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_grade_past_due(self):
"""
Test that we get a 404 when accept_grades_past_due is False and it is past due
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.due = datetime.datetime.now(UTC())
self.xmodule.accept_grades_past_due = False
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
| agpl-3.0 |
CCI-Tools/cate-core | cate/ops/index.py | 1 | 8641 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName, ValidationError
from cate.util.monitor import Monitor
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str = 'n34',
custom_region: PolygonLike.TYPE = None,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValidationError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that containts the index timeseries
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
with monitor.starting("Calculate the index", total_work=2):
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file, monitor=monitor.child(1))
with monitor.child(1).observing("Calculate mean"):
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
| mit |
gaqzi/gocd-cli | tests/test_command.py | 2 | 1952 | import pytest
from gocd_cli.command import BaseCommand
from gocd_cli.exceptions import MissingDocumentationError
class FakeCommand(BaseCommand):
usage_summary = "I'm merely an example of things to come"
usage = """
Args:
something: something else
some-more: the other thing
"""
def __init__(self, server, name, limit=10, failure_mode=False):
pass
class FakeCommandNoKwargs(BaseCommand):
usage = 'Something'
usage_summary = 'I shall perform miracles'
def __init__(self, server, name):
pass
class TestBaseCommandDocumentation(object):
def test_missing_usage_summary_raises_document_missing_error(self):
with pytest.raises(MissingDocumentationError) as exc:
BaseCommand.get_usage_summary()
assert 'Command "BaseCommand" has no "usage_summary" string set.' in str(exc)
def test_missing_usage_raises_document_missing_error(self):
with pytest.raises(MissingDocumentationError) as exc:
BaseCommand.usage_summary = 'something'
BaseCommand.get_usage()
assert 'Command "BaseCommand" has no "usage" string set.' in str(exc)
def test_usage_uses_usage_summary(self):
assert FakeCommand.get_usage_summary()
assert FakeCommand.get_usage_summary() in FakeCommand.get_usage()
def test_usage_uses_call_documentation(self):
assert 'fake-command <name> [--limit] [--failure-mode]' in FakeCommand.get_usage()
def test_get_call_documentation(self):
documentation = FakeCommand.get_call_documentation()
assert 'fake-command' in documentation
assert 'fake-command <name>' in documentation
assert 'fake-command <name> [--limit] [--failure-mode]' in documentation
def test_get_call_documentation_no_defaults(self):
documentation = FakeCommandNoKwargs.get_call_documentation()
assert 'fake-command-no-kwargs <name>' in documentation
| mit |
PGHS-CP1A-2015/python_koans_kjhansen | python2/libs/colorama/ansitowin32.py | 37 | 6006 |
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
else:
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
| mit |
fedora-conary/conary | conary_test/repositorytest/filecontainertest.py | 2 | 5805 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import gzip
import tempfile
import unittest
from conary.lib import util
from conary.repository import filecontainer
from conary.repository.filecontainer import FileContainer
from conary.repository.filecontents import FromFile, FromString
def fileCount():
# this adds one because os.listdir has an open directory
l = os.listdir("/proc/%d/fd" % os.getpid())
return len(l) - 1
def checkFiles(c, names, data, tags):
names = names[:]
i = 0
rc = c.getNextFile()
while rc:
name, tag, f = rc
assert(name == names[0])
del names[0]
s = gzip.GzipFile(None, "r", fileobj = f).read()
if s != data[i]:
raise AssertionError, "bad data for %s" % names[i]
if tag != tags[i]:
raise AssertionError, "bad tag for %s" % names[i]
i += 1
rc = c.getNextFile()
if names:
raise AssertionError, "files not found: %s" % " ".join(names)
class FilecontainerTest(unittest.TestCase):
def setUp(self):
fd, self.fn = tempfile.mkstemp()
#self.fn = "foo"
def test(self):
count = fileCount()
# let's make sure we can't open an arbitrary file as a container
f = util.ExtendedFile("/bin/ls", "r", buffering = False)
self.assertRaises(filecontainer.BadContainer, FileContainer, f)
f.close()
if (count != fileCount()):
raise AssertionError, "too many files are open %s" % count
# create a new container
f = util.ExtendedFile(self.fn, "w+", buffering = False)
c = FileContainer(f)
c.close()
data = []
tags = []
names = []
c = FileContainer(f)
self.assertRaises(AssertionError, c.addFile, "name",
FromString("data"), "tag")
c.close()
os.unlink(self.fn)
f = util.ExtendedFile(self.fn, "w+", buffering = False)
c = FileContainer(f)
data.append("contents of file1")
tags.append("extra data")
names.append("file1")
c.addFile(names[0], FromString(data[0]), tags[0])
data.append("file2 gets some contents")
tags.append("tag")
names.append("file2")
c.addFile(names[1], FromString(data[1]), tags[1])
data.append("")
tags.append("empty")
names.append("")
c.addFile(names[2], FromString(data[2]), tags[2])
c.close()
c = FileContainer(f)
checkFiles(c, names, data, tags)
f = util.ExtendedFile(self.fn, "r+", buffering = False)
c = FileContainer(f)
checkFiles(c, names, data, tags)
c.reset()
checkFiles(c, names, data, tags)
c.close()
f = util.ExtendedFile(self.fn, "r+", buffering = False)
c = FileContainer(f)
name, tag, f = c.getNextFile()
assert(name == names[0])
def testLargeFiles(self):
# test adding files > 4gig to a filecontainer. we replace the write
# call with one which handles sparsity
class SparseFile(util.ExtendedFile):
def __init__(self, *args, **kwargs):
self.needsWrite = False
util.ExtendedFile.__init__(self, *args, **kwargs)
def write(self, s):
if len(s) > 100 and s[0] == '\0' and s[-1] == '\0':
self.seek(len(s) - 1, 2)
self.needsWrite = True
return len(s)
return util.ExtendedFile.write(self, s)
def close(self):
if self.needsWrite:
self.write('\0')
self.needsWrite = False
def seek(self, *args):
if self.needsWrite:
self.write('\0')
self.needsWrite = False
return util.ExtendedFile.seek(self, *args)
class FalseFile:
def __init__(self, size):
self.size = size
self.offset = 0
def seek(self, offset, whence = 0):
assert(whence == 0)
self.offset = offset
def read(self, bytes):
self.offset += bytes
if self.offset > self.size:
self.offset -= bytes
bytes = self.size - self.offset
self.offset = self.size
return "\0" * bytes
f = SparseFile(self.fn, "w+", buffering = False)
c = FileContainer(f)
totalSize = 0x100001000
c.addFile('test', FromFile(FalseFile(totalSize)), 'testdata',
precompressed = True)
c.addFile('end', FromString('endcontents'), 'enddata',
precompressed = True)
c.close()
c = FileContainer(util.ExtendedFile(self.fn, 'r', buffering = False))
name, tag, f = c.getNextFile()
storedSize = f.seek(0, 2)
assert(storedSize == totalSize)
assert(tag == 'testdata')
name, tag, f = c.getNextFile()
assert(name == 'end')
assert(tag == 'enddata')
s = f.read()
assert(s == 'endcontents')
def tearDown(self):
os.unlink(self.fn)
| apache-2.0 |
Tim1928/DBK-3.0_4.1 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
Konubinix/qutebrowser | tests/unit/config/test_style.py | 9 | 3507 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.config.style."""
import logging
import pytest
from PyQt5.QtCore import QObject
from PyQt5.QtGui import QColor
from qutebrowser.config import style
@pytest.mark.parametrize('template, expected', [
("{{ color['completion.bg'] }}", "black"),
("{{ color['completion.fg'] }}", "red"),
("{{ font['completion'] }}", "foo"),
("{{ config.get('foo', 'bar') }}", "baz"),
])
def test_get_stylesheet(config_stub, template, expected):
config_stub.data = {
'colors': {
'completion.bg': 'black',
'completion.fg': 'red',
},
'fonts': {
'completion': 'foo',
},
'foo': {'bar': 'baz'},
}
rendered = style.get_stylesheet(template)
assert rendered == expected
class Obj(QObject):
def __init__(self, stylesheet, parent=None):
super().__init__(parent)
self.STYLESHEET = stylesheet # pylint: disable=invalid-name
self.rendered_stylesheet = None
def setStyleSheet(self, stylesheet):
self.rendered_stylesheet = stylesheet
@pytest.mark.parametrize('delete', [True, False])
def test_set_register_stylesheet(delete, qtbot, config_stub, caplog):
config_stub.data = {'fonts': {'foo': 'bar'}, 'colors': {}}
obj = Obj("{{ font['foo'] }}")
with caplog.at_level(9): # VDEBUG
style.set_register_stylesheet(obj)
assert len(caplog.records) == 1
assert caplog.records[0].message == 'stylesheet for Obj: bar'
assert obj.rendered_stylesheet == 'bar'
if delete:
with qtbot.waitSignal(obj.destroyed):
obj.deleteLater()
config_stub.data = {'fonts': {'foo': 'baz'}, 'colors': {}}
style.get_stylesheet.cache_clear()
config_stub.changed.emit('fonts', 'foo')
if delete:
expected = 'bar'
else:
expected = 'baz'
assert obj.rendered_stylesheet == expected
class TestColorDict:
@pytest.mark.parametrize('key, expected', [
('foo', 'one'),
('foo.fg', 'two'),
('foo.bg', 'three'),
])
def test_values(self, key, expected):
d = style.ColorDict()
d['foo'] = 'one'
d['foo.fg'] = 'two'
d['foo.bg'] = 'three'
assert d[key] == expected
def test_key_error(self, caplog):
d = style.ColorDict()
with caplog.at_level(logging.ERROR):
d['foo'] # pylint: disable=pointless-statement
assert len(caplog.records) == 1
assert caplog.records[0].message == 'No color defined for foo!'
def test_qcolor(self):
d = style.ColorDict()
d['foo'] = QColor()
with pytest.raises(TypeError):
d['foo'] # pylint: disable=pointless-statement
| gpl-3.0 |
bop/hybrid | lib/python2.6/site-packages/pip/vcs/git.py | 473 | 7898 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| gpl-2.0 |
frishberg/django | django/db/backends/utils.py | 66 | 6725 | from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
| bsd-3-clause |
goulu/networkx | networkx/algorithms/flow/tests/test_maxflow.py | 5 | 18883 | # -*- coding: utf-8 -*-
"""Maximum flow algorithms test suite.
"""
from nose.tools import *
import networkx as nx
from networkx.algorithms.flow import build_flow_dict, build_residual_network
from networkx.algorithms.flow import boykov_kolmogorov
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
from networkx.algorithms.flow import dinitz
flow_funcs = [boykov_kolmogorov, dinitz, edmonds_karp, preflow_push, shortest_augmenting_path]
max_min_funcs = [nx.maximum_flow, nx.minimum_cut]
flow_value_funcs = [nx.maximum_flow_value, nx.minimum_cut_value]
interface_funcs = sum([max_min_funcs, flow_value_funcs], [])
all_funcs = sum([flow_funcs, interface_funcs], [])
msg = "Assertion failed in function: {0}"
msgi = "Assertion failed in function: {0} in interface {1}"
def compute_cutset(G, partition):
reachable, non_reachable = partition
cutset = set()
for u, nbrs in ((n, G[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
return cutset
def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func):
assert_equal(set(G), set(flowDict), msg=msg.format(flow_func.__name__))
for u in G:
assert_equal(set(G[u]), set(flowDict[u]),
msg=msg.format(flow_func.__name__))
excess = dict((u, 0) for u in flowDict)
for u in flowDict:
for v, flow in flowDict[u].items():
if capacity in G[u][v]:
ok_(flow <= G[u][v][capacity])
ok_(flow >= 0, msg=msg.format(flow_func.__name__))
excess[u] -= flow
excess[v] += flow
for u, exc in excess.items():
if u == s:
assert_equal(exc, -solnValue, msg=msg.format(flow_func.__name__))
elif u == t:
assert_equal(exc, solnValue, msg=msg.format(flow_func.__name__))
else:
assert_equal(exc, 0, msg=msg.format(flow_func.__name__))
def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func):
assert_true(all(n in G for n in partition[0]),
msg=msg.format(flow_func.__name__))
assert_true(all(n in G for n in partition[1]),
msg=msg.format(flow_func.__name__))
cutset = compute_cutset(G, partition)
assert_true(all(G.has_edge(u, v) for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
assert_equal(solnValue, sum(G[u][v][capacity] for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(cutset)
if not G.is_directed():
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
else:
assert_false(nx.is_strongly_connected(H),
msg=msg.format(flow_func.__name__))
def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity='capacity'):
for flow_func in flow_funcs:
R = flow_func(G, s, t, capacity)
# Test both legacy and new implementations.
flow_value = R.graph['flow_value']
flow_dict = build_flow_dict(G, R)
assert_equal(flow_value, solnValue, msg=msg.format(flow_func.__name__))
validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func)
# Minimum cut
cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity,
flow_func=flow_func)
validate_cuts(G, s, t, solnValue, partition, capacity, flow_func)
class TestMaxflowMinCutCommon:
def test_graph1(self):
# Trivial undirected graph
G = nx.Graph()
G.add_edge(1,2, capacity = 1.0)
solnFlows = {1: {2: 1.0},
2: {1: 1.0}}
compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0)
def test_graph2(self):
# A more complex undirected graph
# adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.Graph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 3, 'b': 1},
'a': {'c': 3, 'x': 3},
'b': {'c': 1, 'd': 2, 'x': 1},
'c': {'a': 3, 'b': 1, 'y': 2},
'd': {'b': 2, 'e': 2},
'e': {'d': 2, 'y': 2},
'y': {'c': 2, 'e': 2}}
compare_flows_and_cuts(G, 'x', 'y', H, 4.0)
def test_digraph1(self):
# The classic directed graph example
G = nx.DiGraph()
G.add_edge('a','b', capacity = 1000.0)
G.add_edge('a','c', capacity = 1000.0)
G.add_edge('b','c', capacity = 1.0)
G.add_edge('b','d', capacity = 1000.0)
G.add_edge('c','d', capacity = 1000.0)
H = {'a': {'b': 1000.0, 'c': 1000.0},
'b': {'c': 0, 'd': 1000.0},
'c': {'d': 1000.0},
'd': {}}
compare_flows_and_cuts(G, 'a', 'd', H, 2000.0)
def test_digraph2(self):
# An example in which some edges end up with zero flow.
G = nx.DiGraph()
G.add_edge('s', 'b', capacity = 2)
G.add_edge('s', 'c', capacity = 1)
G.add_edge('c', 'd', capacity = 1)
G.add_edge('d', 'a', capacity = 1)
G.add_edge('b', 'a', capacity = 2)
G.add_edge('a', 't', capacity = 2)
H = {'s': {'b': 2, 'c': 0},
'c': {'d': 0},
'd': {'a': 0},
'b': {'a': 2},
'a': {'t': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 2)
def test_digraph3(self):
# A directed graph example from Cormen et al.
G = nx.DiGraph()
G.add_edge('s','v1', capacity = 16.0)
G.add_edge('s','v2', capacity = 13.0)
G.add_edge('v1','v2', capacity = 10.0)
G.add_edge('v2','v1', capacity = 4.0)
G.add_edge('v1','v3', capacity = 12.0)
G.add_edge('v3','v2', capacity = 9.0)
G.add_edge('v2','v4', capacity = 14.0)
G.add_edge('v4','v3', capacity = 7.0)
G.add_edge('v3','t', capacity = 20.0)
G.add_edge('v4','t', capacity = 4.0)
H = {'s': {'v1': 12.0, 'v2': 11.0},
'v2': {'v1': 0, 'v4': 11.0},
'v1': {'v2': 0, 'v3': 12.0},
'v3': {'v2': 0, 't': 19.0},
'v4': {'v3': 7.0, 't': 4.0},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 23.0)
def test_digraph4(self):
# A more complex directed graph
# from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
compare_flows_and_cuts(G, 'x', 'y', H, 3.0)
def test_wikipedia_dinitz_example(self):
# Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm
G = nx.DiGraph()
G.add_edge('s', 1, capacity=10)
G.add_edge('s', 2, capacity=10)
G.add_edge(1, 3, capacity=4)
G.add_edge(1, 4, capacity=8)
G.add_edge(1, 2, capacity=2)
G.add_edge(2, 4, capacity=9)
G.add_edge(3, 't', capacity=10)
G.add_edge(4, 3, capacity=6)
G.add_edge(4, 't', capacity=10)
solnFlows = {1: {2: 0, 3: 4, 4: 6},
2: {4: 9},
3: {'t': 9},
4: {3: 5, 't': 10},
's': {1: 10, 2: 9},
't': {}}
compare_flows_and_cuts(G, 's', 't', solnFlows, 19)
def test_optional_capacity(self):
# Test optional capacity parameter.
G = nx.DiGraph()
G.add_edge('x','a', spam = 3.0)
G.add_edge('x','b', spam = 1.0)
G.add_edge('a','c', spam = 3.0)
G.add_edge('b','c', spam = 5.0)
G.add_edge('b','d', spam = 4.0)
G.add_edge('d','e', spam = 2.0)
G.add_edge('c','y', spam = 2.0)
G.add_edge('e','y', spam = 3.0)
solnFlows = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
solnValue = 3.0
s = 'x'
t = 'y'
compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity = 'spam')
def test_digraph_infcap_edges(self):
# DiGraph with infinite capacity edges
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'b': {'c': 12},
'c': {'t': 37},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
# DiGraph with infinite capacity digon
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 85)
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('c', 'a')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't', capacity = 37)
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'c': {'a': 0, 't': 37},
'b': {'c': 12},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph_infcap_path(self):
# Graph with infinite capacity (s, t)-path
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
for flow_func in all_funcs:
assert_raises(nx.NetworkXUnbounded,
flow_func, G, 's', 't')
def test_graph_infcap_edges(self):
# Undirected graph with infinite capacity edges
G = nx.Graph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 's': 85, 't': 60},
'b': {'c': 12, 's': 12},
'c': {'a': 25, 'b': 12, 't': 37},
't': {'a': 60, 'c': 37}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph4(self):
# From ticket #429 by mfrasca.
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 2)
G.add_edge('s', 'b', capacity = 2)
G.add_edge('a', 'b', capacity = 5)
G.add_edge('a', 't', capacity = 1)
G.add_edge('b', 'a', capacity = 1)
G.add_edge('b', 't', capacity = 3)
flowSoln = {'a': {'b': 1, 't': 1},
'b': {'a': 0, 't': 3},
's': {'a': 2, 'b': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', flowSoln, 4)
def test_disconnected(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(1)
assert_equal(nx.maximum_flow_value(G,0,3), 0)
flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}}
compare_flows_and_cuts(G, 0, 3, flowSoln, 0)
def test_source_target_not_in_graph(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(3)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
def test_source_target_coincide(self):
G = nx.Graph()
G.add_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
def test_multigraphs_raise(self):
G = nx.MultiGraph()
M = nx.MultiDiGraph()
G.add_edges_from([(0, 1), (1, 0)], capacity=True)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
class TestMaxFlowMinCutInterface:
def setup(self):
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
self.G = G
H = nx.DiGraph()
H.add_edge(0, 1, capacity = 1.0)
H.add_edge(1, 2, capacity = 1.0)
self.H = H
def test_flow_func_not_callable(self):
elements = ['this_should_be_callable', 10, set([1,2,3])]
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)], weight='capacity')
for flow_func in interface_funcs:
for element in elements:
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
def test_flow_func_parameters(self):
G = self.G
fv = 3.0
for interface_func in interface_funcs:
for flow_func in flow_funcs:
result = interface_func(G, 'x', 'y', flow_func=flow_func)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_minimum_cut_no_cutoff(self):
G = self.G
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.minimum_cut, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
assert_raises(nx.NetworkXError, nx.minimum_cut_value, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
def test_kwargs(self):
G = self.H
fv = 1.0
to_test = (
(shortest_augmenting_path, dict(two_phase=True)),
(preflow_push, dict(global_relabel_freq=5)),
)
for interface_func in interface_funcs:
for flow_func, kwargs in to_test:
result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_kwargs_default_flow_func(self):
G = self.H
for interface_func in interface_funcs:
assert_raises(nx.NetworkXError, interface_func,
G, 0, 1, global_relabel_freq=2)
def test_reusing_residual(self):
G = self.G
fv = 3.0
s, t = 'x', 'y'
R = build_residual_network(G, 'capacity')
for interface_func in interface_funcs:
for flow_func in flow_funcs:
for i in range(3):
result = interface_func(G, 'x', 'y', flow_func=flow_func,
residual=R)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result,
msg=msgi.format(flow_func.__name__,
interface_func.__name__))
# Tests specific to one algorithm
def test_preflow_push_global_relabel_freq():
G = nx.DiGraph()
G.add_edge(1, 2, capacity=1)
R = preflow_push(G, 1, 2, global_relabel_freq=None)
assert_equal(R.graph['flow_value'], 1)
assert_raises(nx.NetworkXError, preflow_push, G, 1, 2,
global_relabel_freq=-1)
def test_preflow_push_makes_enough_space():
#From ticket #1542
G = nx.DiGraph()
nx.add_path(G, [0, 1, 3], capacity=1)
nx.add_path(G, [1, 2, 3], capacity=1)
R = preflow_push(G, 0, 3, value_only=False)
assert_equal(R.graph['flow_value'], 1)
def test_shortest_augmenting_path_two_phase():
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=1)
nx.add_path(G, ((i, j) for j in range(p)), capacity=1)
G.add_edge((i, p - 1), 't', capacity=1)
R = shortest_augmenting_path(G, 's', 't', two_phase=True)
assert_equal(R.graph['flow_value'], k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False)
assert_equal(R.graph['flow_value'], k)
class TestCutoff:
def test_cutoff(self):
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=2)
nx.add_path(G, ((i, j) for j in range(p)), capacity=2)
G.add_edge((i, p - 1), 't', capacity=2)
R = shortest_augmenting_path(G, 's', 't', two_phase=True, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = edmonds_karp(G, 's', 't', cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
def test_complete_graph_cutoff(self):
G = nx.complete_graph(5)
nx.set_edge_attributes(G, 'capacity',
dict(((u, v), 1) for u, v in G.edges()))
for flow_func in [shortest_augmenting_path, edmonds_karp]:
for cutoff in [3, 2, 1]:
result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
cutoff=cutoff)
assert_equal(cutoff, result,
msg="cutoff error in {0}".format(flow_func.__name__))
| bsd-3-clause |
Fireblend/chromium-crosswalk | tools/telemetry/telemetry/core/platform/tracing_options.py | 12 | 1519 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
RECORD_AS_MUCH_AS_POSSIBLE = 'record-as-much-as-possible'
RECORD_UNTIL_FULL = 'record-until-full'
RECORD_MODES = (RECORD_AS_MUCH_AS_POSSIBLE, RECORD_UNTIL_FULL)
class TracingOptions(object):
"""Tracing options control which core tracing systems should be enabled.
This simply turns on those systems. If those systems have additional options,
e.g. what to trace, then they are typically configured by adding
categories to the TracingCategoryFilter.
Options:
enable_chrome_trace: a boolean that specifies whether to enable
chrome tracing.
enable_platform_display_trace: a boolean that specifies whether to
platform display tracing.
record_mode: can be any mode in RECORD_MODES. This corresponds to
record modes in chrome (see
TraceRecordMode in base/trace_event/trace_event_impl.h for
more information)
"""
def __init__(self):
self.enable_chrome_trace = False
self.enable_platform_display_trace = False
self._record_mode = RECORD_AS_MUCH_AS_POSSIBLE
@property
def record_mode(self): # pylint: disable=E0202
return self._record_mode
@record_mode.setter
def record_mode(self, value): # pylint: disable=E0202
assert value in RECORD_MODES
self._record_mode = value
| bsd-3-clause |
Passtechsoft/TPEAlpGen | blender/release/scripts/freestyle/styles/sequentialsplit_sketchy.py | 6 | 1933 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : sequentialsplit_sketchy.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Use the sequential split with two different
# predicates to specify respectively the starting and
# the stopping extremities for strokes
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.predicates import (
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueUP1D,
pyBackTVertexUP0D,
pyVertexNatureUP0D,
)
from freestyle.shaders import (
ConstantColorShader,
IncreasingThicknessShader,
SpatialNoiseShader,
)
from freestyle.types import Nature, Operators
upred = QuantitativeInvisibilityUP1D(0)
Operators.select(upred)
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(upred))
## starting and stopping predicates:
start = pyVertexNatureUP0D(Nature.NON_T_VERTEX)
stop = pyBackTVertexUP0D()
Operators.sequential_split(start, stop, 10)
shaders_list = [
SpatialNoiseShader(7, 120, 2, True, True),
IncreasingThicknessShader(5, 8),
ConstantColorShader(0.2, 0.2, 0.2, 1),
]
Operators.create(TrueUP1D(), shaders_list)
| gpl-3.0 |
los-cocos/etc_code | cocos#248--RectMapCollider, player sometimes stuck/start.py | 1 | 6979 | """
A script to demo a defect in RectMapCollider, initial report by Netanel at
https://groups.google.com/forum/#!topic/cocos-discuss/a494vcH-u3I
The defect is that the player gets stuck at some positions, and it was confirmed
for cocos master Aug 1, 2015 (292ae676) and cocos-0.6.3-release, see cocos #248
The package 'blinker' (available from pipy) is needed to run this script
Further investigation shows that this happens when both of this concur
1. the player actively pushes against a blocking surface
2. player rect alligns with the grid tile.
changes from the OP bugdemo code:
lines irrelevant to the bug removed
changed player controls
added a view to show the potentially colliding cells that RectMapCollider
will consider (sin as red rectangle overlapping the player)
player pic edited to make visible the actual player boundary
Controlling the player:
use left-right for horizontal move, must keep pressing to move
use up-down to move vertical; a press adds/substracts up to y-velocity
Demoing the bug:
1. move to touch the left wall.
2. release 'left' key
3. move up and down, this works
4. keep pressed the 'left' key, and try to move down: player gets stuck
at some alineations
scene
background
scroller=ScrollingManager
tilemap <- load(...)['map0']
layer=Game (a ScrollableLayer)
sprite
particles
potential collisions view, ShowCollision
"""
from __future__ import division, print_function
from cocos.particle_systems import *
from cocos.particle import Color
from cocos.text import Label
from cocos.tiles import load, RectMapLayer
from cocos.mapcolliders import RectMapWithPropsCollider
from cocos.layer import Layer, ColorLayer, ScrollingManager, ScrollableLayer
from cocos.sprite import Sprite
from cocos.actions import *
from cocos.scene import Scene
from cocos.director import director
from pyglet.window import key
from pyglet.window.key import symbol_string, KeyStateHandler
from menu import GameMenu
import blinker
director.init(width=1920, height=480, autoscale = True, resizable = True)
Map = load("mapmaking.tmx")
scroller = ScrollingManager()
tilemap = Map['map0']
assert tilemap.origin_x == 0
assert tilemap.origin_y == 0
class Background(ColorLayer):
def __init__(self):
super(Background, self).__init__(65,120,255,255)
class ShowCollision(ScrollableLayer):
"""
A layer to show the cells a RectMapCollider considers potentially
colliding with the 'new' rect.
Use with CustomRectMapCollider so the event of interest is published
"""
def __init__(self):
super(ShowCollision, self).__init__()
self.collision_view = []
for i in range(10):
self.collision_view.append(ColorLayer(255, 0, 0, 255, width=64, height=64))
for e in self.collision_view:
self.add(e)
signal = blinker.signal("collider cells")
signal.connect(self.on_collision_changed)
def on_collision_changed(self, sender, payload=None):
for cell, view in zip(payload, self.collision_view):
view.position = (cell.i * 64, cell.j * 64)
view.opacity = 140
for i in range(len(payload), len(self.collision_view)):
self.collision_view[i].opacity = 0
class Game(ScrollableLayer):
is_event_handler = True
def __init__(self):
super(Game, self).__init__()
self.score = 0
# Add player
self.sprite = Sprite('magic.png')
self.sprite.position = 320, 240
self.sprite.direction = "right"
self.sprite.dx = 0
self.sprite.dy = 0
self.add(self.sprite, z=1)
# A list of balls
self.balls = set()
# Teleportation counter
self.teleportation = 0
self.sprite.jump = 0
def on_key_press(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx -= 3
print("press left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx += 3
print("press right, dx:", self.sprite.dx)
if symbol_string(inp) == "UP":
self.sprite.dy += 3
if self.sprite.dy > 6:
self.sprite.dy = 6
print("press up, dy:", self.sprite.dy)
if symbol_string(inp) == "DOWN":
self.sprite.dy -= 3
if self.sprite.dy < -6:
self.sprite.dy = -6
print("press down, dy:", self.sprite.dy)
def on_key_release(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx = 0
print("release left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx = 0
print("release right, dx:", self.sprite.dx)
class SpyCollider(RectMapWithPropsCollider):
"""
Same as RectMapWithPropsCollider, except it publishes which cells will be considered
for collision.
Usage:
# istantiate
a = SpyCollider()
# set the behavior for velocity change on collision with
# a.on_bump_handler = a.on_bump_slide
# add the signal we want to emit
a.signal = blinker.signal("collider cells")
# use as stock RectMapCollider
# catch the signal with something like ShowCollision
"""
def collide_map(self, maplayer, last, new, vx, vy):
"""collide_map en dos pasadas; """
objects = maplayer.get_in_region(*(new.bottomleft + new.topright))
self.signal.send(payload=objects)
return super(SpyCollider, self).collide_map(maplayer, last, new, vx, vy)
layer = Game()
collider = SpyCollider()
collider.on_bump_handler = collider.on_bump_slide
collider.signal = blinker.signal("collider cells")
#collider = RectMapCollider()
# WARN: this was hacked for bugdemo purposes only; don't use in real code:
# lots of globals
# position delta must use dt, else unpredictable view velocity
def update(dt):
""" Update game"""
last = layer.sprite.get_rect()
new = last.copy()
new.x += layer.sprite.dx
new.y += layer.sprite.dy
# dont care about velocity, pass 0, 0
collider.collide_map(tilemap, last, new, 0.0, 0.0)
layer.sprite.position = new.center
scroller.set_focus(*new.center)
# Schedule Updates
layer.schedule(update)
# Add map to scroller
scroller.add(tilemap)
#Create Scene
scene = Scene()
# Create and add background
background = Background()
scene.add(background)
#Add main layer to scroller
scroller.add(layer)
scroller.add(ShowCollision())
# Add scroller to scene
scene.add(scroller)
# Game menu configuration
menu = GameMenu(scene)
menuScene = Scene()
menuScene.add(menu)
director.run(menuScene)
| mit |
dfalt974/SickRage | lib/sqlalchemy/event/attr.py | 77 | 12639 | # event/attr.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import, with_statement
from .. import util
from ..util import threading
from . import registry
from . import legacy
from itertools import chain
import weakref
class RefCollection(object):
@util.memoized_property
def ref(self):
return weakref.ref(self, registry._collection_gced)
class _DispatchDescriptor(RefCollection):
"""Class-level attributes on :class:`._Dispatch` classes."""
def __init__(self, parent_dispatch_cls, fn):
self.__name__ = fn.__name__
argspec = util.inspect_getargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.keywords)
self.legacy_signatures = list(reversed(
sorted(
getattr(fn, '_legacy_signatures', []),
key=lambda s: s[0]
)
))
self.__doc__ = fn.__doc__ = legacy._augment_fn_docs(
self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
self._empty_listeners = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].insert(0, event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def update_subclass(self, target):
if target not in self._clslevel:
self._clslevel[target] = []
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
fn for fn
in self._clslevel[cls]
if fn not in clslevel
])
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher[:] = []
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _DispatchDescriptor at the class level of
a dispatcher, this returns self.
"""
return self
def __get__(self, obj, cls):
if obj is None:
return self
elif obj._parent_cls in self._empty_listeners:
ret = self._empty_listeners[obj._parent_cls]
else:
self._empty_listeners[obj._parent_cls] = ret = \
_EmptyListener(self, obj._parent_cls)
# assigning it to __dict__ means
# memoized for fast re-access. but more memory.
obj.__dict__[self.__name__] = ret
return ret
class _HasParentDispatchDescriptor(object):
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_HasParentDispatchDescriptor):
"""Serves as a class-level interface to the events
served by a _DispatchDescriptor, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _DispatchDescriptor
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.__name__
self.propagate = frozenset()
self.listeners = ()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._parent_cls)
if obj.__dict__[self.name] is self:
obj.__dict__[self.name] = result
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_HasParentDispatchDescriptor):
_exec_once = False
@util.memoized_property
def _exec_once_mutex(self):
return threading.Lock()
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
finally:
self._exec_once = True
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(RefCollection, _CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.__name__
self.listeners = []
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [l for l
in other.listeners
if l not in existing_listener_set
and not only_propagate or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key._listen_fn not in self.listeners:
event_key.prepend_to_list(self, self.listeners)
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key._listen_fn not in self.listeners:
event_key.append_to_list(self, self.listeners)
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners[:] = []
class _JoinedDispatchDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, cls):
if obj is None:
return self
else:
obj.__dict__[self.name] = ret = _JoinedListener(
obj.parent, self.name,
getattr(obj.local, self.name)
)
return ret
class _JoinedListener(_CompoundListener):
_exec_once = False
def __init__(self, parent, name, local):
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
| gpl-3.0 |
jprine/pelican-plugins | category_meta/category_meta.py | 3 | 4347 | '''Copyright 2014 Zack Weinberg
Category Metadata
-----------------
A plugin to read metadata for each category from an index file in that
category's directory.
For this plugin to work properly, your articles should not have a
Category: tag in their metadata; instead, they should be stored in
(subdirectories of) per-category directories. Each per-category
directory must have a file named 'index.ext' at its top level, where
.ext is any extension that will be picked up by an article reader.
The metadata of that article becomes the metadata for the category,
copied over verbatim, with three special cases:
* The category's name is set to the article's title.
* The category's slug is set to the name of the parent directory
of the index.ext file.
* The _text_ of the article is stored as category.description.
'''
from pelican import signals
import os
import re
import logging
logger = logging.getLogger(__name__)
### CORE BUG: Content.url_format does not honor category.slug (or
### author.slug). The sanest way to fix this without modifying core
### is to dynamically redefine each article's class to a subclass
### of itself with the bug fixed.
###
### https://github.com/getpelican/pelican/issues/1547
patched_subclasses = {}
def make_patched_subclass(klass):
if klass.__name__ not in patched_subclasses:
class PatchedContent(klass):
@property
def url_format(self):
metadata = super(PatchedContent, self).url_format
if hasattr(self.author, 'slug'):
metadata['author'] = self.author.slug
if hasattr(self.category, 'slug'):
metadata['category'] = self.category.slug
return metadata
# Code in core uses Content class names as keys for things.
PatchedContent.__name__ = klass.__name__
patched_subclasses[klass.__name__] = PatchedContent
return patched_subclasses[klass.__name__]
def patch_urlformat(article):
article.__class__ = make_patched_subclass(article.__class__)
def make_category(article, slug):
# Reuse the article's existing category object.
category = article.category
# Setting a category's name resets its slug, so do that first.
category.name = article.title
category.slug = slug
# Description from article text.
# XXX Relative URLs in the article content may not be handled correctly.
setattr(category, 'description', article.content)
# Metadata, to the extent that this makes sense.
for k, v in article.metadata.items():
if k not in ('path', 'slug', 'category', 'name', 'title',
'description', 'reader'):
setattr(category, k, v)
logger.debug("Category: %s -> %s", category.slug, category.name)
return category
def pretaxonomy_hook(generator):
"""This hook is invoked before the generator's .categories property is
filled in. Each article has already been assigned a category
object, but these objects are _not_ unique per category and so are
not safe to tack metadata onto (as is).
The category metadata we're looking for is represented as an
Article object, one per directory, whose filename is 'index.ext'.
"""
category_objects = {}
real_articles = []
for article in generator.articles:
dirname, fname = os.path.split(article.source_path)
fname, _ = os.path.splitext(fname)
if fname == 'index':
category_objects[dirname] = \
make_category(article, os.path.basename(dirname))
else:
real_articles.append(article)
category_assignment = \
re.compile("^(" +
"|".join(re.escape(prefix)
for prefix in category_objects.keys()) +
")/")
for article in real_articles:
m = category_assignment.match(article.source_path)
if not m or m.group(1) not in category_objects:
logger.error("No category assignment for %s (%s)",
article, article.source_path)
continue
patch_urlformat(article)
article.category = category_objects[m.group(1)]
generator.articles = real_articles
def register():
signals.article_generator_pretaxonomy.connect(pretaxonomy_hook)
| agpl-3.0 |
TimBuckley/effective_django | django/views/generic/edit.py | 15 | 8635 | import warnings
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
prefix = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_prefix(self):
"""
Returns the prefix to use for forms on this view
"""
return self.prefix
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
fields = None
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
if self.fields is None:
warnings.warn("Using ModelFormMixin (base class of %s) without "
"the 'fields' attribute is deprecated." % self.__class__.__name__,
DeprecationWarning)
return model_forms.modelform_factory(model, fields=self.fields)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
"""
Returns the supplied URL.
"""
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url % self.object.__dict__
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| bsd-3-clause |
scs/uclinux | user/python/python-2.4.4/Mac/Modules/cm/cmscan.py | 5 | 3001 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
import os
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "Components"
SHORT = "cm"
def main():
input = "Components.h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
#
# FindNextComponent is a special case, since it call also be called
# with None as the argument. Hence, we make it a function
#
if t == "Component" and m == "InMode" and name != "FindNextComponent":
classname = "Method"
listname = "c_methods"
elif t == "ComponentInstance" and m == "InMode":
classname = "Method"
listname = "ci_methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
def makeblacklistnames(self):
return [
"OpenADefaultComponent",
"GetComponentTypeModSeed",
"OpenAComponentResFile",
"CallComponentUnregister",
"CallComponentTarget",
"CallComponentRegister",
"CallComponentVersion",
"CallComponentCanDo",
"CallComponentClose",
"CallComponentOpen",
"OpenAComponent",
"GetComponentPublicResource", # Missing in CW Pro 6
"CallComponentGetPublicResource", # Missing in CW Pro 6
'SetComponentInstanceA5',
'GetComponentInstanceA5',
]
def makeblacklisttypes(self):
return [
"ResourceSpec",
"ComponentResource",
"ComponentPlatformInfo",
"ComponentResourceExtension",
"ComponentPlatformInfoArray",
"ExtComponentResource",
"ComponentParameters",
"ComponentRoutineUPP",
"ComponentMPWorkFunctionUPP",
"ComponentFunctionUPP",
"GetMissingComponentResourceUPP",
]
def makerepairinstructions(self):
return [
([('ComponentDescription', 'looking', 'OutMode')],
[('ComponentDescription', '*', 'InMode')]),
]
if __name__ == "__main__":
main()
| gpl-2.0 |
MackZxh/OCA-Choice | hr/hr_experience_analytic/__openerp__.py | 19 | 1956 | ###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
"name": "Experience and Analytic Accounting",
"version": "0.1",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"category": "Human Resources",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"depends": [
"hr_experience",
"account",
],
"description": """
This module allows you to link your employee experiences with projects
or contracts.
This is useful if you want to have the same project description and metrics on
all the resumes of the employees involved in the same project or contract.
Configuration
=============
Make sure to add users to the "Analytic Accounting" group to show the analytic
account on the profesionnal experience form.
Contributors
============
* Savoir-faire Linux <support@savoirfairelinux.com>
* Maxime Chambreuil <maxime.chambreuil@savoirfairelinux.com>
""",
"data": [
"hr_experience_analytic_view.xml",
],
'installable': False,
}
| lgpl-3.0 |
marratj/ansible | lib/ansible/modules/monitoring/zabbix_screen.py | 24 | 17036 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_screen
short_description: Zabbix screen creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
timeout:
description:
- The timeout of API request (seconds).
default: 10
screens:
description:
- List of screens to be created/updated/deleted(see example).
- If the screen(s) already been added, the screen(s) name won't be updated.
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
- When deleting screen(s), the C(screen_name) is required.
- >
The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s)
will just be updated as needed.
required: true
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Create/update a screen.
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Create/update multi-screen
- name: Create two of new screens or update the existing screens' items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
- screen_name: ExampleScreen2
host_group: Example group2
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurrent updates
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
screens:
- screen_name: ExampleScreen
host_group: Example group
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import ZabbixAPIException
from zabbix_api import Already_Exists
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
# we have to inherit the ZabbixAPI class to add 'screenitem' support.
class ZabbixAPIExtends(ZabbixAPI):
screenitem = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class Screen(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get group id by group name
def get_host_group_id(self, group_name):
if group_name == "":
self._module.fail_json(msg="group_name is required")
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
if len(hostGroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % group_name)
else:
hostGroup_id = hostGroup_list[0]['groupid']
return hostGroup_id
# get monitored host_id by host_group_id
def get_host_ids_by_group_id(self, group_id):
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
if len(host_list) < 1:
self._module.fail_json(msg="No host in the group.")
else:
host_ids = []
for i in host_list:
host_id = i['hostid']
host_ids.append(host_id)
return host_ids
# get screen
def get_screen_id(self, screen_name):
if screen_name == "":
self._module.fail_json(msg="screen_name is required")
try:
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
if len(screen_id_list) >= 1:
screen_id = screen_id_list[0]['screenid']
return screen_id
return None
except Exception as e:
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
# create screen
def create_screen(self, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
return screen['screenids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
# update screen
def update_screen(self, screen_id, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
except Exception as e:
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
# delete screen
def delete_screen(self, screen_id, screen_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.delete([screen_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
# get graph ids
def get_graph_ids(self, hosts, graph_name_list):
graph_id_lists = []
vsize = 1
for host in hosts:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
size = len(graph_id_list)
if size > 0:
graph_id_lists.extend(graph_id_list)
if vsize < size:
vsize = size
return graph_id_lists, vsize
# getGraphs
def get_graphs_by_host_id(self, graph_name_list, host_id):
graph_ids = []
for graph_name in graph_name_list:
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
graph_id_list = []
if len(graphs_list) > 0:
for graph in graphs_list:
graph_id = graph['graphid']
graph_id_list.append(graph_id)
if len(graph_id_list) > 0:
graph_ids.extend(graph_id_list)
return graph_ids
# get screen items
def get_screen_items(self, screen_id):
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
return screen_item_list
# delete screen items
def delete_screen_items(self, screen_id, screen_item_id_list):
try:
if len(screen_item_id_list) == 0:
return True
screen_item_list = self.get_screen_items(screen_id)
if len(screen_item_list) > 0:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screenitem.delete(screen_item_id_list)
return True
return False
except ZabbixAPIException:
pass
# get screen's hsize and vsize
def get_hsize_vsize(self, hosts, v_size):
h_size = len(hosts)
if h_size == 1:
if v_size == 1:
h_size = 1
elif v_size in range(2, 9):
h_size = 2
else:
h_size = 3
v_size = (v_size - 1) // h_size + 1
return h_size, v_size
# create screen_items
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
if len(hosts) < 4:
if width is None or width < 0:
width = 500
else:
if width is None or width < 0:
width = 200
if height is None or height < 0:
height = 100
try:
# when there're only one host, only one row is not good.
if len(hosts) == 1:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
for i, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % h_size, 'y': i // h_size, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
else:
for i, host in enumerate(hosts):
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
for j, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
except Already_Exists:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
timeout=dict(type='int', default=10),
screens=dict(type='list', required=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
created_screens = []
changed_screens = []
deleted_screens = []
for zabbix_screen in screens:
screen_name = zabbix_screen['screen_name']
screen_id = screen.get_screen_id(screen_name)
state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
if state == "absent":
if screen_id:
screen_item_list = screen.get_screen_items(screen_id)
screen_item_id_list = []
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
screen_item_id_list.append(screen_item_id)
screen.delete_screen_items(screen_id, screen_item_id_list)
screen.delete_screen(screen_id, screen_name)
deleted_screens.append(screen_name)
else:
host_group = zabbix_screen['host_group']
graph_names = zabbix_screen['graph_names']
graph_width = None
if 'graph_width' in zabbix_screen:
graph_width = zabbix_screen['graph_width']
graph_height = None
if 'graph_height' in zabbix_screen:
graph_height = zabbix_screen['graph_height']
host_group_id = screen.get_host_group_id(host_group)
hosts = screen.get_host_ids_by_group_id(host_group_id)
screen_item_id_list = []
resource_id_list = []
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
if not screen_id:
# create screen
screen_id = screen.create_screen(screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
created_screens.append(screen_name)
else:
screen_item_list = screen.get_screen_items(screen_id)
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
resource_id = screen_item['resourceid']
screen_item_id_list.append(screen_item_id)
resource_id_list.append(resource_id)
# when the screen items changed, then update
if graph_ids != resource_id_list:
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
if deleted:
screen.update_screen(screen_id, screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
changed_screens.append(screen_name)
if created_screens and changed_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens),
",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
elif deleted_screens:
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
mozman/ezdxf | tests/test_01_dxf_entities/test_131_field_list.py | 1 | 2369 | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
from typing import cast
import pytest
import ezdxf
from ezdxf.entities.idbuffer import FieldList
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
FIELDLIST = """0
FIELDLIST
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbIdSet
90
12
100
AcDbFieldList
"""
@pytest.fixture
def entity():
return FieldList.from_text(FIELDLIST)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'FIELDLIST' in ENTITY_CLASSES
def test_default_init():
entity = FieldList()
assert entity.dxftype() == 'FIELDLIST'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = FieldList.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.flags == 0
assert len(entity.handles) == 0
def test_load_from_text(entity):
assert entity.dxf.flags == 12
assert len(entity.handles) == 0
def test_write_dxf():
entity = FieldList.from_text(FIELDLIST)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(FIELDLIST)
assert result == expected
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2007')
def test_generic_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
assert len(field_list.handles) == 0
def test_set_get_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
field_list.handles = ['FF', 'EE', 'DD']
handles = field_list.handles
assert len(handles) == 3
assert handles == ['FF', 'EE', 'DD']
handles.append('FFFF')
assert handles[-1] == 'FFFF'
def test_dxf_tags(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
tags = TagCollector.dxftags(buffer)[-4:]
assert len(tags) == 4
assert tags[0] == (330, 'FF')
assert tags[-1] == (330, 'CC')
def test_clone(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
buffer2 = cast(FieldList, buffer.copy())
buffer2.handles[-1] = 'ABCD'
assert buffer.handles[:-1] == buffer2.handles[:-1]
assert buffer.handles[-1] != buffer2.handles[-1]
| mit |
michaelkirk/QGIS | tests/src/python/test_qgsrectangle.py | 11 | 7274 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsComposition.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
from qgis.core import QgsRectangle, QgsPoint
from utilities import (getQgisTestApp,
compareWkt,
TestCase,
unittest,
expectedFailure
)
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsRectangle(TestCase):
# Because isEmpty() is not returning expected result in 9b0fee3
@expectedFailure
def testCtor(self):
rect = QgsRectangle(5.0, 5.0, 10.0, 10.0)
myExpectedResult = True
myResult = rect.isEmpty()
myMessage = ('Expected: %s Got: %s' % (myExpectedResult, myResult))
assert rect.isEmpty(), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(5.0, rect.xMinimum()))
assert rect.xMinimum() == 5.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(5.0, rect.yMinimum()))
assert rect.yMinimum() == 5.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(10.0, rect.xMaximum()))
assert rect.xMaximum() == 10.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(10.0, rect.yMaximum()))
assert rect.yMaximum() == 10.0, myMessage
def testDimensions(self):
rect = QgsRectangle( 0.0, 0.0, 10.0, 10.0)
myMessage = ('Expected: %s\nGot: %s\n' %
(10.0, rect.width()))
assert rect.width() == 10.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(10.0, rect.height()))
assert rect.height() == 10.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
("5.0, 5.0", rect.center().toString()))
assert rect.center() == QgsPoint(5.0, 5.0), myMessage
rect.scale(2.0)
myMessage = ('Expected: %s\nGot: %s\n' %
(20.0, rect.width()))
assert rect.width() == 20.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(20.0, rect.height()))
assert rect.height() == 20.0, myMessage
def testIntersection(self):
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
rect2 = QgsRectangle( 2.0, 2.0, 7.0, 7.0)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.intersects(rect2)))
assert rect1.intersects(rect2), myMessage
rect3 = rect1.intersect(rect2)
self.assertFalse(rect3.isEmpty(), "Empty rectangle returned")
myMessage = ('Expected: %s\nGot: %s\n' %
(3.0, rect3.width()))
assert rect3.width() == 3.0, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(3.0, rect3.height()))
assert rect3.height() == 3.0, myMessage
def testContains(self):
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
rect2 = QgsRectangle( 2.0, 2.0, 7.0, 7.0)
pnt1 = QgsPoint(4.0, 4.0)
pnt2 = QgsPoint(6.0, 2.0)
rect3 = rect1.intersect(rect2)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.contains(rect3)))
assert rect1.contains(rect3), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect2.contains(rect3)))
assert rect2.contains(rect3), myMessage
# test for point
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.contains(pnt1)))
assert rect1.contains(pnt1), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect2.contains(pnt1)))
assert rect2.contains(pnt1), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect3.contains(pnt1)))
assert rect3.contains(pnt1), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(False, rect1.contains(pnt2)))
self.assertFalse(rect1.contains(pnt2), myMessage)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect2.contains(pnt2)))
assert rect2.contains(pnt2), myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(False, rect3.contains(pnt2)))
self.assertFalse(rect3.contains(pnt2), myMessage)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect3.contains(pnt1)))
self.assertTrue(rect3.contains(pnt1), myMessage)
def testUnion(self):
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
rect2 = QgsRectangle( 2.0, 2.0, 7.0, 7.0)
pnt1 = QgsPoint(6.0, 2.0)
rect1.combineExtentWith(rect2)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.contains(rect2)))
assert rect1.contains(rect2), myMessage
print rect1.toString()
assert (rect1 == QgsRectangle(0.0, 0.0, 7.0, 7.0),
'Wrong combine with rectangle result')
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
rect1.combineExtentWith(6.0, 2.0)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.contains(pnt1)))
assert rect1.contains(pnt1), myMessage
myExpectedResult = QgsRectangle(0.0, 0.0, 6.0, 5.0).toString()
myResult = rect1.toString()
myMessage = ('Expected: %s\nGot: %s\n' %
(myExpectedResult, myResult))
self.assertEquals(myResult, myExpectedResult, myMessage)
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
rect1.unionRect(rect2)
myMessage = ('Expected: %s\nGot: %s\n' %
(True, rect1.contains(rect2)))
assert rect1.contains(rect2), myMessage
assert rect1 == QgsRectangle(0.0, 0.0, 7.0, 7.0), "Wrong union result"
def testAsWktCoordinates(self):
"""Test that we can get a proper wkt representation fo the rect"""
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
myExpectedWkt = ('0 0, '
'5 5')
myWkt = rect1.asWktCoordinates()
myMessage = ('Expected: %s\nGot: %s\n' %
(myExpectedWkt, myWkt))
assert compareWkt( myWkt, myExpectedWkt ), myMessage
def testAsWktPolygon(self):
"""Test that we can get a proper rect wkt polygon representation for rect"""
rect1 = QgsRectangle( 0.0, 0.0, 5.0, 5.0)
myExpectedWkt = ('POLYGON((0 0, '
'5 0, '
'5 5, '
'0 5, '
'0 0))')
myWkt = rect1.asWktPolygon()
myMessage = ('Expected: %s\nGot: %s\n' %
(myExpectedWkt, myWkt))
assert compareWkt( myWkt, myExpectedWkt ), myMessage
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
usc-isi/nova | nova/policy.py | 7 | 3601 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Nova"""
import os.path
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import policy
from nova import utils
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file representing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule checked when requested rule is not found')),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(policy_opts)
_POLICY_PATH = None
_POLICY_CACHE = {}
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = FLAGS.policy_file
if not os.path.exists(_POLICY_PATH):
_POLICY_PATH = FLAGS.find_file(_POLICY_PATH)
if not _POLICY_PATH:
raise exception.ConfigNotFound(path=FLAGS.policy_file)
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_brain)
def _set_brain(data):
default_rule = FLAGS.policy_default_rule
policy.set_brain(policy.Brain.load_json(data, default_rule))
def enforce(context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:raises nova.exception.PolicyNotAllowed: if verification fails.
"""
init()
match_list = ('rule:%s' % action,)
credentials = context.to_dict()
# NOTE(vish): This is to work around the following launchpad bug:
# https://bugs.launchpad.net/openstack-common/+bug/1039132
# It can be removed when that bug is fixed.
credentials['is_admin'] = unicode(credentials['is_admin'])
policy.enforce(match_list, target, credentials,
exception.PolicyNotAuthorized, action=action)
def check_is_admin(roles):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
action = 'context_is_admin'
match_list = ('rule:%s' % action,)
target = {}
credentials = {'roles': roles}
try:
policy.enforce(match_list, target, credentials,
exception.PolicyNotAuthorized, action=action)
except exception.PolicyNotAuthorized:
return False
return True
| apache-2.0 |
krautradio/PyRfK | lib/rfk/database/base.py | 1 | 14479 | import time
import hashlib
from datetime import timedelta
from passlib.hash import bcrypt
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref, exc
from sqlalchemy.dialects.mysql import INTEGER as Integer
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql.expression import case
import re
import os
from flask.ext.login import AnonymousUserMixin
import rfk.database
from rfk.types import SET, ENUM
from rfk import exc as rexc
from rfk import CONFIG
from rfk.database import Base, UTCDateTime
from rfk.database.show import UserShow, Show
from rfk.helper import now, get_path
class Anonymous(AnonymousUserMixin):
def __init__(self):
AnonymousUserMixin.__init__(self)
self.locale = 'de'
self.timezone = 'Europe/Berlin'
def get_locale(self):
return self.locale
def get_timezone(self):
return self.timezone
def has_permission(self, code=None, permission=None):
return False
class User(Base):
__tablename__ = 'users'
user = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
username = Column(String(50), unique=True)
password = Column(String(64))
mail = Column(String(255))
country = Column(String(3))
register_date = Column(UTCDateTime, default=now)
last_login = Column(UTCDateTime, default=None)
def get_id(self):
return unicode(self.user)
def is_anonymous(self):
return False
def is_active(self):
return True
def is_authenticated(self):
return True
def get_locale(self):
return self.get_setting(code='locale')
def get_timezone(self):
return self.get_setting(code='timezone')
@staticmethod
def authenticate(username, password):
"""shorthand function for authentication a user
returns the user object
Keyword arguments:
username -- username
password -- unencrypted password
"""
user = User.get_user(username=username)
if user.check_password(password):
return user
else:
raise rexc.base.InvalidPasswordException()
@staticmethod
def get_user(id=None, username=None):
assert id or username
try:
if username is None:
return User.query.filter(User.user == id).one()
else:
return User.query.filter(User.username == username).one()
except exc.NoResultFound:
raise rexc.base.UserNotFoundException
@staticmethod
def check_username(username):
if re.match('^[0-9a-zA-Z_-]{3,}$', username) is None:
return False
else:
return True
@staticmethod
def make_password(password):
return bcrypt.encrypt(password)
@staticmethod
def add_user(username, password):
if not User.check_username(username):
raise rexc.base.InvalidUsernameException
try:
User.query.filter(User.username == username).one()
raise rexc.base.UserNameTakenException()
except exc.NoResultFound:
user = User(username=username, password=User.make_password(password))
rfk.database.session.add(user)
rfk.database.session.flush()
return user
def check_password(self, password):
try:
return bcrypt.verify(password, self.password)
except ValueError:
if hashlib.sha1(password).hexdigest() == self.password:
self.password = User.make_password(password)
return True
else:
return False
def add_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return False
except exc.NoResultFound:
self.permissions.append(UserPermission(permission))
return True
def has_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return True
except exc.NoResultFound:
return False
def get_setting(self, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
try:
us = UserSetting.query.filter(UserSetting.user == self,
UserSetting.setting == setting).one()
return us.get_value()
except exc.NoResultFound:
return None
def set_setting(self, value, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
UserSetting.set_value(self, setting, value)
rfk.database.session.flush()
def get_total_streamtime(self):
"""Returns a timedelta Object with the users total time streamed"""
try:
return timedelta(seconds= float(rfk.database.session
.query( func.sum( func.time_to_sec( func.timediff(Show.end,Show.begin) ) ) ) \
.join(UserShow).filter(UserShow.status == UserShow.STATUS.STREAMED,
UserShow.user == self).first()[0]))
except TypeError:
return timedelta(seconds=0)
def __repr__(self):
return "<USER username={0}>".format(self.username)
class Setting(Base):
__tablename__ = 'settings'
setting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
val_type = Column(Integer(unsigned=True))
TYPES = ENUM(['INT', 'STR'])
@staticmethod
def get_setting(code):
return Setting.query.filter(Setting.code == code).one()
@staticmethod
def add_setting(code, name, val_type):
try:
return Setting.query.filter(Setting.code == code).one()
except exc.NoResultFound:
return Setting(code=code, name=name, val_type=val_type)
class UserSetting(Base):
__tablename__ = 'user_settings'
userSetting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('settings'))
setting_id = Column("setting", Integer(unsigned=True),
ForeignKey('settings.setting',
onupdate="CASCADE",
ondelete="RESTRICT"))
setting = relationship("Setting")
val_int = Column(Integer)
val_str = Column(String(255))
def get_value(self):
if self.setting.val_type == Setting.TYPES.INT:
return self.val_int
elif self.setting.val_type == Setting.TYPES.STR:
return self.val_str
@staticmethod
def set_value(user, setting, value):
if value == True:
value = 1
elif value == False:
value = 0
try:
us = UserSetting.query.filter(UserSetting.user == user,
UserSetting.setting == setting).one()
except exc.NoResultFound:
us = UserSetting(user=user, setting=setting)
if us.setting.val_type == Setting.TYPES.INT:
us.val_int = value
elif us.setting.val_type == Setting.TYPES.STR:
us.val_str = value
class Permission(Base):
__tablename__ = 'permissions'
permission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
@staticmethod
def get_permission(code):
return Permission.query.filter(Permission.code == code).one()
@staticmethod
def add_permission(code, name):
try:
return Permission.query.filter(Permission.code == code).one()
except exc.NoResultFound:
return Permission(code=code, name=name)
class UserPermission(Base):
__tablename__ = 'user_permissions'
userPermission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('permissions', cascade="all, delete-orphan"))
permission_id = Column("permission", Integer(unsigned=True),
ForeignKey('permissions.permission',
onupdate="CASCADE",
ondelete="RESTRICT"))
permission = relationship("Permission", backref=backref('users', cascade="all, delete-orphan"))
def __init__(self, permission):
self.permission = permission
class Ban(Base):
__tablename__ = 'bans'
ban = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('bans'))
range = Column(String(50))
expiration = Column(UTCDateTime)
class News(Base):
__tablename__ = 'news'
news = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
time = Column(UTCDateTime, default=now())
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User")
title = Column(String(255))
content = Column(Text)
class ApiKey(Base):
__tablename__ = 'apikeys'
apikey = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref="apikeys")
key = Column(String(128))
counter = Column(Integer(unsigned=True), default=0)
access = Column(UTCDateTime, default=now())
application = Column(String(128))
description = Column(String(255))
flag = Column(Integer(unsigned=True), default=0)
FLAGS = SET(['DISABLED', 'FASTQUERY', 'KICK', 'BAN', 'AUTH'])
def gen_key(self):
c = 0
while True:
key = hashlib.sha1("%s%s%d%d" % (self.application, self.description, time.time(), c)).hexdigest()
if ApiKey.query.filter(ApiKey.key == key).first() == None:
break
self.key = key
@staticmethod
def check_key(key):
try:
apikey = ApiKey.query.filter(ApiKey.key == key).one()
except (exc.NoResultFound, exc.MultipleResultsFound):
raise rexc.api.KeyInvalidException()
if apikey.flag & ApiKey.FLAGS.DISABLED:
raise rexc.api.KeyDisabledException()
elif not apikey.flag & ApiKey.FLAGS.FASTQUERY:
if now() - apikey.access <= timedelta(seconds=1):
raise rexc.api.FastQueryException(last_access=apikey.access)
apikey.counter += 1
apikey.access = now()
return apikey
class Log(Base):
__tablename__ = 'log'
log = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
timestamp = Column(UTCDateTime, default=now)
severity = Column(Integer(unsigned=True))
module = Column(String(50))
message = Column(Text)
class Loop(Base):
__tablename__ = 'loops'
loop = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
begin = Column(Integer(unsigned=True), default=0)
end = Column(Integer(unsigned=True), default=1440)
filename = Column(String(50))
@hybrid_property
def length(self):
if (self.end >= self.begin):
return abs(self.end - self.begin)
else:
return abs((self.end + 2400) - self.begin)
@length.expression
def length(cls):
return func.abs(cast(case([(cls.begin <= cls.end, cls.end),
(cls.begin >= cls.end, cls.end + 2400)]), Integer) - cast(cls.begin, Integer))
@hybrid_method
def contains(self, point):
return case([(self.begin <= self.end, (self.begin <= point) & (self.end >= point)),
(self.begin >= self.end, (self.begin <= point) | (self.end >= point))])
@hybrid_property
def file_exists(self):
if self.filename is None:
return False
return os.path.exists(os.path.join(get_path(CONFIG.get('liquidsoap', 'looppath')), self.filename))
@staticmethod
def get_current_loop():
"""
returns the current loop to be scheduled
@todo maybe broken ;_;
"""
n = now()
#try to find a loop that should be running
loops = Loop.query.filter(Loop.contains(int(n.hour * 100 + (n.minute / 60.) * 100))).order_by(
Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
# we found no loops
# just try to find the longest one
loops = Loop.query.order_by(Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
#okay, now we have a problem, just retun none
return None
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/260_test_descr.py | 2 | 145704 | import types
import unittest
import warnings
from copy import deepcopy
from test import test_support
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in self.binops.items():
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
self.binops[name] = expr
self.unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'long': 'long',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in self.unops.items():
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
self.unops[name] = expr
def setUp(self):
self.original_filters = warnings.filters[:]
warnings.filterwarnings("ignore",
r'complex divmod\(\), // and % are deprecated$',
DeprecationWarning, r'(<string>|%s)$' % __name__)
def tearDown(self):
warnings.filters = self.original_filters
def unop_test(self, a, res, expr="len(a)", meth="__len__"):
d = {'a': a}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
# Find method in parent class
while meth not in t.__dict__:
t = t.__bases__[0]
self.assertEqual(m, t.__dict__[meth])
self.assertEqual(m(a), res)
bm = getattr(a, meth)
self.assertEqual(bm(), res)
def binop_test(self, a, b, res, expr="a+b", meth="__add__"):
d = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
if meth == '__divmod__': pass
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
self.assertEqual(m, t.__dict__[meth])
self.assertEqual(m(a, b), res)
bm = getattr(a, meth)
self.assertEqual(bm(b), res)
def ternop_test(self, a, b, c, res, expr="a[b:c]", meth="__getslice__"):
d = {'a': a, 'b': b, 'c': c}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
self.assertEqual(m, t.__dict__[meth])
self.assertEqual(m(a, b, c), res)
bm = getattr(a, meth)
self.assertEqual(bm(b, c), res)
def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"):
d = {'a': deepcopy(a), 'b': b}
exec stmt in d
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
self.assertEqual(m, t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b)
self.assertEqual(d['a'], res)
def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
d = {'a': deepcopy(a), 'b': b, 'c': c}
exec stmt in d
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
self.assertEqual(m, t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b, c)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b, c)
self.assertEqual(d['a'], res)
def set3op_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setslice__"):
dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec stmt in dictionary
self.assertEqual(dictionary['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
self.assertEqual(m, t.__dict__[meth])
dictionary['a'] = deepcopy(a)
m(dictionary['a'], b, c, d)
self.assertEqual(dictionary['a'], res)
dictionary['a'] = deepcopy(a)
bm = getattr(dictionary['a'], meth)
bm(b, c, d)
self.assertEqual(dictionary['a'], res)
def test_lists(self):
# Testing list operations...
# Asserts are within individual test methods
self.binop_test([1], [2], [1,2], "a+b", "__add__")
self.binop_test([1,2,3], 2, 1, "b in a", "__contains__")
self.binop_test([1,2,3], 4, 0, "b in a", "__contains__")
self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__")
self.ternop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getslice__")
self.setop_test([1], [2], [1,2], "a+=b", "__iadd__")
self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
self.unop_test([1,2,3], 3, "len(a)", "__len__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
self.set3op_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d",
"__setslice__")
def test_dicts(self):
# Testing dict operations...
self.binop_test({1:2}, {2:1}, -1, "cmp(a,b)", "__cmp__")
self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2, 3:4}
l1 = []
for i in d.keys():
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in dict.__iter__(d):
l.append(i)
self.assertEqual(l, l1)
d = {1:2, 3:4}
self.unop_test(d, 2, "len(a)", "__len__")
self.assertEqual(eval(repr(d), {}), d)
self.assertEqual(eval(d.__repr__(), {}), d)
self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c",
"__setitem__")
# Tests for unary and binary operators
def number_operators(self, a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in self.binops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.binop_test(a, b, res, expr, name)
for name, expr in self.unops.items():
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.unop_test(a, res, expr, name)
def test_ints(self):
# Testing int operations...
self.number_operators(100, 3)
# The following crashes in Python 2.2
self.assertEqual((1).__nonzero__(), 1)
self.assertEqual((0).__nonzero__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
self.assertEqual(C(5L), 5)
try:
C() + ""
except TypeError:
pass
else:
self.fail("NotImplemented should have caused TypeError")
import sys
try:
C(sys.maxint+1)
except OverflowError:
pass
else:
self.fail("should have raised OverflowError")
def test_longs(self):
# Testing long operations...
self.number_operators(100L, 3L)
def test_floats(self):
# Testing float operations...
self.number_operators(100.0, 3.0)
def test_complexes(self):
# Testing complex operations...
self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge',
'int', 'long', 'float'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
self.assertEqual(repr(a), "3.14")
self.assertEqual(a.prec, 6)
a = Number(a, prec=2)
self.assertEqual(repr(a), "3.1")
self.assertEqual(a.prec, 2)
a = Number(234.5)
self.assertEqual(repr(a), "234.5")
self.assertEqual(a.prec, 12)
def test_spam_lists(self):
# Testing spamlist operations...
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b",
"__add__")
self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
self.ternop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]",
"__getslice__")
self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b",
"__iadd__")
self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b",
"__imul__")
self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b",
"__mul__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a",
"__rmul__")
self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c",
"__setitem__")
self.set3op_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setslice__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
self.assertEqual(a, [])
self.assertEqual(a.foo(), 1)
a.append(100)
self.assertEqual(a, [100])
self.assertEqual(a.getstate(), 0)
a.setstate(42)
self.assertEqual(a.getstate(), 42)
def test_spam_dicts(self):
# Testing spamdict operations...
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in d.items():
sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
self.binop_test(spamdict({1:2}), spamdict({2:1}), -1, "cmp(a,b)",
"__cmp__")
self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in d.keys():
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in type(spamdict({})).__iter__(d):
l.append(i)
self.assertEqual(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
self.unop_test(spamd, 2, "len(a)", "__len__")
self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__")
self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
self.assertEqual(a.items(), [])
self.assertEqual(a.foo(), 1)
a['foo'] = 'bar'
self.assertEqual(a.items(), [('foo', 'bar')])
self.assertEqual(a.getstate(), 0)
a.setstate(100)
self.assertEqual(a.getstate(), 100)
class ClassPropertiesAndMethods(unittest.TestCase):
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assert_(issubclass(dict, dict))
self.assert_(isinstance({}, dict))
d = dict()
self.assertEqual(d, {})
self.assert_(d.__class__ is dict)
self.assert_(isinstance(d, dict))
class C(dict):
state = -1
def __init__(self_local, *a, **kw):
if a:
self.assertEqual(len(a), 1)
self_local.state = a[0]
if kw:
for k, v in kw.items():
self_local[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self_local, key, value):
self.assert_(isinstance(key, type(0)))
dict.__setitem__(self_local, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
self.assert_(issubclass(C, dict))
a1 = C(12)
self.assertEqual(a1.state, 12)
a2 = C(foo=1, bar=2)
self.assertEqual(a2[1] == 'foo' and a2[2], 'bar')
a = C()
self.assertEqual(a.state, -1)
self.assertEqual(a.getstate(), -1)
a.setstate(0)
self.assertEqual(a.state, 0)
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.state, 10)
self.assertEqual(a.getstate(), 10)
self.assertEqual(a[42], 0)
a[42] = 24
self.assertEqual(a[42], 24)
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
self.assertEqual(a[i][j], i*j)
def test_python_lists(self):
# Testing Python subclass of list...
class C(list):
def __getitem__(self, i):
return list.__getitem__(self, i) + 100
def __getslice__(self, i, j):
return (i, j)
a = C()
a.extend([0,1,2])
self.assertEqual(a[0], 100)
self.assertEqual(a[1], 101)
self.assertEqual(a[2], 102)
self.assertEqual(a[100:200], (100,200))
def test_metaclass(self):
# Testing __metaclass__...
class C:
__metaclass__ = type
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D:
class __metaclass__(type):
def myself(cls): return cls
self.assertEqual(D.myself(), D)
d = D()
self.assertEqual(d.__class__, D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C:
__metaclass__ = M1
self.assertEqual(C.__spam__, 1)
c = C()
self.assertEqual(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
@staticmethod
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C:
__metaclass__ = M2
def spam(self):
return 42
self.assertEqual(C.name, 'C')
self.assertEqual(C.bases, ())
self.assert_('spam' in C.dict)
c = C()
self.assertEqual(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A:
__metaclass__ = autosuper
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
self.assertEqual(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
self.assertEqual(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.iteritems():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.iteritems():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A:
__metaclass__ = autoproperty
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
self.assert_(not hasattr(a, "x"))
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A:
__metaclass__ = multimetaclass
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
self.assertEqual(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C:
__metaclass__ = T
self.assertEqual(T.counter, 1)
a = C()
self.assertEqual(type(a), C)
self.assertEqual(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: self.fail("calling object w/o call method should raise "
"TypeError")
# Testing code to find most derived baseclass
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object):
pass
class C(object):
__metaclass__ = A
# The most derived metaclass of D is A rather than type.
class D(B, C):
pass
def test_module_subclasses(self):
# Testing Python subclass of module...
log = []
import types, sys
MT = type(sys)
class MM(MT):
def __init__(self, name):
MT.__init__(self, name)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM("a")
a.foo = 12
x = a.foo
del a.foo
self.assertEqual(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
# http://python.org/sf/1174712
try:
class Module(types.ModuleType, str):
pass
except TypeError:
pass
else:
self.fail("inheriting from ModuleType and str at the same time "
"should fail")
def test_multiple_inheritence(self):
# Testing multiple inheritance...
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
self.assertEqual(d.keys(), [])
d["hello"] = "world"
self.assertEqual(d.items(), [("hello", "world")])
self.assertEqual(d["hello"], "world")
self.assertEqual(d.getstate(), 0)
d.setstate(10)
self.assertEqual(d.getstate(), 10)
self.assertEqual(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
self.assertEqual(Node().__int__(), 23)
self.assertEqual(int(Node()), 23)
self.assertEqual(Frag().__int__(), 42)
self.assertEqual(int(Frag()), 42)
# MI mixing classic and new-style classes.
class A:
x = 1
class B(A):
pass
class C(A):
x = 2
class D(B, C):
pass
self.assertEqual(D.x, 1)
# Classic MRO is preserved for a classic base class.
class E(D, object):
pass
self.assertEqual(E.__mro__, (E, D, B, A, C, object))
self.assertEqual(E.x, 1)
# But with a mix of classic bases, their MROs are combined using
# new-style MRO.
class F(B, C, object):
pass
self.assertEqual(F.__mro__, (F, B, C, A, object))
self.assertEqual(F.x, 2)
# Try something else.
class C:
def cmethod(self):
return "C a"
def all_method(self):
return "C b"
class M1(C, object):
def m1method(self):
return "M1 a"
def all_method(self):
return "M1 b"
self.assertEqual(M1.__mro__, (M1, C, object))
m = M1()
self.assertEqual(m.cmethod(), "C a")
self.assertEqual(m.m1method(), "M1 a")
self.assertEqual(m.all_method(), "M1 b")
class D(C):
def dmethod(self):
return "D a"
def all_method(self):
return "D b"
class M2(D, object):
def m2method(self):
return "M2 a"
def all_method(self):
return "M2 b"
self.assertEqual(M2.__mro__, (M2, D, C, object))
m = M2()
self.assertEqual(m.cmethod(), "C a")
self.assertEqual(m.dmethod(), "D a")
self.assertEqual(m.m2method(), "M2 a")
self.assertEqual(m.all_method(), "M2 b")
class M3(M1, M2, object):
def m3method(self):
return "M3 a"
def all_method(self):
return "M3 b"
self.assertEqual(M3.__mro__, (M3, M1, M2, D, C, object))
m = M3()
self.assertEqual(m.cmethod(), "C a")
self.assertEqual(m.dmethod(), "D a")
self.assertEqual(m.m1method(), "M1 a")
self.assertEqual(m.m2method(), "M2 a")
self.assertEqual(m.m3method(), "M3 a")
self.assertEqual(m.all_method(), "M3 b")
class Classic:
pass
try:
class New(Classic):
__metaclass__ = type
except TypeError:
pass
else:
self.fail("new class with only classic bases - shouldn't be")
def test_diamond_inheritence(self):
# Testing multiple inheritance special cases...
class A(object):
def spam(self): return "A"
self.assertEqual(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
self.assertEqual(B().spam(), "B")
self.assertEqual(B().boo(), "B")
class C(A):
def boo(self): return "C"
self.assertEqual(C().spam(), "A")
self.assertEqual(C().boo(), "C")
class D(B, C): pass
self.assertEqual(D().spam(), "B")
self.assertEqual(D().boo(), "B")
self.assertEqual(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
self.assertEqual(E().spam(), "B")
self.assertEqual(E().boo(), "C")
self.assertEqual(E.__mro__, (E, C, B, A, object))
# MRO order disagreement
try:
class F(D, E): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (F)")
try:
class G(E, D): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (G)")
# see thread python-dev/2002-October/029035.html
def test_ex5_from_c3_switch(self):
# Testing ex5 from C3 switch discussion...
class A(object): pass
class B(object): pass
class C(object): pass
class X(A): pass
class Y(A): pass
class Z(X,B,Y,C): pass
self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_monotonicity(self):
# Testing MRO monotonicity...
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess,WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat,SmallCatamaran): pass
self.assertEqual(PedalWheelBoat.__mro__,
(PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object))
self.assertEqual(SmallCatamaran.__mro__,
(SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
self.assertEqual(Pedalo.__mro__,
(Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
SmallMultihull, DayBoat, WheelBoat, Boat, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_consistency_with_epg(self):
# Testing consistentcy with EPG...
class Pane(object): pass
class ScrollingMixin(object): pass
class EditingMixin(object): pass
class ScrollablePane(Pane,ScrollingMixin): pass
class EditablePane(Pane,EditingMixin): pass
class EditableScrollablePane(ScrollablePane,EditablePane): pass
self.assertEqual(EditableScrollablePane.__mro__,
(EditableScrollablePane, ScrollablePane, EditablePane, Pane,
ScrollingMixin, EditingMixin, object))
def test_mro_disagreement(self):
# Testing error messages for MRO disagreement...
mro_err_msg = """Cannot create a consistent method resolution
order (MRO) for bases """
def raises(exc, expected, callable, *args):
try:
callable(*args)
except exc, msg:
if not str(msg).startswith(expected):
self.fail("Message %r, expected %r" % (str(msg), expected))
else:
self.fail("Expected %s" % exc)
class A(object): pass
class B(A): pass
class C(object): pass
# Test some very simple errors
raises(TypeError, "duplicate base class A",
type, "X", (A, A), {})
raises(TypeError, mro_err_msg,
type, "X", (A, B), {})
raises(TypeError, mro_err_msg,
type, "X", (A, C, B), {})
# Test a slightly more complex error
class GridLayout(object): pass
class HorizontalGrid(GridLayout): pass
class VerticalGrid(GridLayout): pass
class HVGrid(HorizontalGrid, VerticalGrid): pass
class VHGrid(VerticalGrid, HorizontalGrid): pass
raises(TypeError, mro_err_msg,
type, "ConfusedGrid", (HVGrid, VHGrid), {})
def test_object_class(self):
# Testing object class...
a = object()
self.assertEqual(a.__class__, object)
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
self.assertFalse(hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
self.assertFalse(hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
self.assertEqual(x.__dict__, {})
x.foo = 1
self.assertEqual(x.foo, 1)
self.assertEqual(x.__dict__, {'foo': 1})
def test_slots(self):
# Testing __slots__...
class C0(object):
__slots__ = []
x = C0()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "a"))
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
self.assertFalse(hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, 'a'))
self.assertFalse(hasattr(x, 'b'))
self.assertFalse(hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
class C4(object):
"""Validate name mangling"""
__slots__ = ['__a']
def __init__(self, value):
self.__a = value
def get(self):
return self.__a
x = C4(5)
self.assertFalse(hasattr(x, '__dict__'))
self.assertFalse(hasattr(x, '__a'))
self.assertEqual(x.get(), 5)
try:
x.__a = 6
except AttributeError:
pass
else:
self.fail("Double underscored names not mangled")
# Make sure slot names are proper identifiers
try:
class C(object):
__slots__ = [None]
except TypeError:
pass
else:
self.fail("[None] slots not caught")
try:
class C(object):
__slots__ = ["foo bar"]
except TypeError:
pass
else:
self.fail("['foo bar'] slots not caught")
try:
class C(object):
__slots__ = ["foo\0bar"]
except TypeError:
pass
else:
self.fail("['foo\\0bar'] slots not caught")
try:
class C(object):
__slots__ = ["1"]
except TypeError:
pass
else:
self.fail("['1'] slots not caught")
try:
class C(object):
__slots__ = [""]
except TypeError:
pass
else:
self.fail("[''] slots not caught")
class C(object):
__slots__ = ["a", "a_b", "_a", "A0123456789Z"]
# XXX(nnorwitz): was there supposed to be something tested
# from the class above?
# Test a single string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# Test unicode slot names
try:
unicode
except NameError:
pass
else:
# Test a single unicode string is not expanded as a sequence.
class C(object):
__slots__ = unicode("abc")
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# _unicode_to_string used to modify slots in certain circumstances
slots = (unicode("foo"), unicode("bar"))
class C(object):
__slots__ = slots
x = C()
x.foo = 5
self.assertEqual(x.foo, 5)
self.assertEqual(type(slots[0]), unicode)
# this used to leak references
try:
class C(object):
__slots__ = [unichr(128)]
except (TypeError, UnicodeEncodeError):
pass
else:
self.fail("[unichr(128)] slots not caught")
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
self.assertEqual(Counted.counter, 3)
del x
self.assertEqual(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
self.assertEqual(Counted.counter, 2)
del x
self.assertEqual(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
self.assertEqual(Counted.counter, 3)
del x
self.assertEqual(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
log = []
s = F()
s.a = [Counted(), s]
self.assertEqual(Counted.counter, 1)
s = None
import gc
gc.collect()
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
import sys,gc
class G(object):
def __cmp__(self, other):
return 0
__hash__ = None # Silence Py3k warning
g = G()
orig_objects = len(gc.get_objects())
for i in xrange(10):
g==g
new_objects = len(gc.get_objects())
self.assertEqual(orig_objects, new_objects)
class H(object):
__slots__ = ['a', 'b']
def __init__(self):
self.a = 1
self.b = 2
def __del__(self_):
self.assertEqual(self_.a, 1)
self.assertEqual(self_.b, 2)
save_stderr = sys.stderr
sys.stderr = sys.stdout
h = H()
try:
del h
finally:
sys.stderr = save_stderr
def test_slots_special(self):
# Testing __dict__ and __weakref__ in __slots__...
class D(object):
__slots__ = ["__dict__"]
a = D()
self.assert_(hasattr(a, "__dict__"))
self.assertFalse(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
self.assert_(hasattr(a, "__weakref__"))
self.assertFalse(hasattr(a, "__dict__"))
try:
a.foo = 42
except AttributeError:
pass
else:
self.fail("shouldn't be allowed to set a.foo")
class C1(W, D):
__slots__ = []
a = C1()
self.assert_(hasattr(a, "__dict__"))
self.assert_(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
self.assert_(hasattr(a, "__dict__"))
self.assert_(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
def test_slots_descriptor(self):
# Issue2115: slot descriptors did not correctly check
# the type of the given object
import abc
class MyABC:
__metaclass__ = abc.ABCMeta
__slots__ = "a"
class Unrelated(object):
pass
MyABC.register(Unrelated)
u = Unrelated()
self.assert_(isinstance(u, MyABC))
# This used to crash
self.assertRaises(TypeError, MyABC.a.__set__, u, 3)
def test_dynamics(self):
# Testing class attribute propagation...
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
self.assertEqual(D.foo, 1)
# Test that dynamic attributes are inherited
self.assertEqual(E.foo, 1)
self.assertEqual(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
self.assertFalse(hasattr(a, "foobar"))
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
self.assertEqual(a.method(), 42)
C.__repr__ = lambda self: "C()"
self.assertEqual(repr(a), "C()")
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertFalse(hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test handling of long*seq and seq*long
class L(long):
pass
self.assertEqual("a"*L(2L), "aa")
self.assertEqual(L(2L)*"a", "aa")
self.assertEqual(2*L(3), 6)
self.assertEqual(L(3)*2, 6)
self.assertEqual(L(3)*L(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass:
__metaclass__ = dynamicmetaclass
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
try:
class C(list, dict):
pass
except TypeError:
pass
else:
self.fail("inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
self.fail("inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
self.fail("inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
self.fail("__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
self.fail("__slots__ = [1] should be illegal")
class M1(type):
pass
class M2(type):
pass
class A1(object):
__metaclass__ = M1
class A2(object):
__metaclass__ = M2
try:
class B(A1, A2):
pass
except TypeError:
pass
else:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing class methods...
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
self.assertEqual(C.goo.im_self, C)
self.assertEqual(D.goo.im_self, D)
self.assertEqual(super(D,D).goo.im_self, D)
self.assertEqual(super(D,d).goo.im_self, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that argument is checked for callability (SF bug 753451)
try:
classmethod(1).__get__(1)
except TypeError:
pass
else:
self.fail("classmethod should check for callability")
# Verify that classmethod() doesn't allow keyword args
try:
classmethod(f, kw=1)
except TypeError:
pass
else:
self.fail("classmethod shouldn't accept keyword args")
def test_classmethods_in_c(self):
# Testing C-based class methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_staticmethods(self):
# Testing static methods...
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
def test_staticmethods_in_c(self):
# Testing C-based static methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
self.assertEqual(E().foo, C.foo) # i.e., unbound
self.assert_(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
self.assertEqual(hasattr(a, 'x'), 0)
def test_newslots(self):
# Testing __new__ slot override...
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
class D(C):
pass
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(D,B,C,A):
__metaclass__ = PerverseMetaType
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
try:
class X(object):
class __metaclass__(type):
def mro(self):
return [self, dict, object]
except TypeError:
pass
else:
self.fail("devious mro() return not caught")
try:
class X(object):
class __metaclass__(type):
def mro(self):
return [1]
except TypeError:
pass
else:
self.fail("non-class mro() return not caught")
try:
class X(object):
class __metaclass__(type):
def mro(self):
return 1
except TypeError:
pass
else:
self.fail("non-sequence mro() return not caught")
def test_overloading(self):
# Testing operator overloading...
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
def __getslice__(self, i, j):
return ("getslice", i, j)
def __setslice__(self, i, j, value):
self.setslice = (i, j, value)
def __delslice__(self, i, j):
self.delslice = (i, j)
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
del a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
del a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getslice", 0, 10))
a[0:10] = "foo"
self.assertEqual(a.setslice, (0, 10, "foo"))
del a[0:10]
self.assertEqual(a.delslice, (0, 10))
def test_methods(self):
# Testing methods...
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
class E(object):
foo = C.foo
self.assertEqual(E().foo, C.foo) # i.e., unbound
self.assert_(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ for which a built-in default exists
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
self.assert_(not not c1) # What?
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(cmp(c1, c2), cmp(id(c1), id(c2)))
self.assertEqual(c1, c1)
self.assert_(c1 != c2)
self.assert_(not c1 != c1)
self.assert_(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assert_(str(c1).find('C object at ') >= 0)
self.assertEqual(str(c1), repr(c1))
self.assert_(-1 not in c1)
for i in range(10):
self.assert_(i in c1)
self.assertFalse(10 in c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
self.assert_(not not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(cmp(d1, d2), cmp(id(d1), id(d2)))
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assert_(not d1 != d1)
self.assert_(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assert_(str(d1).find('D object at ') >= 0)
self.assertEqual(str(d1), repr(d1))
self.assert_(-1 not in d1)
for i in range(10):
self.assert_(i in d1)
self.assertFalse(10 in d1)
# Test overridden behavior for static classes
class Proxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
self.assert_(not not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assert_(not p0 != p0)
self.assertEqual(not p0, p1)
self.assertEqual(cmp(p0, p1), -1)
self.assertEqual(cmp(p0, p0), 0)
self.assertEqual(cmp(p0, p_1), 1)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertFalse(-1 in p10)
for i in range(10):
self.assert_(i in p10)
self.assertFalse(10 in p10)
# Test overridden behavior for dynamic classes
class DProxy(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __cmp__(self, other):
return cmp(self.x, other.x)
def __str__(self):
return "DProxy:%s" % self.x
def __repr__(self):
return "DProxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = DProxy(0)
p1 = DProxy(1)
p_1 = DProxy(-1)
self.assertFalse(p0)
self.assert_(not not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertNotEqual(not p0, p0)
self.assertEqual(not p0, p1)
self.assertEqual(cmp(p0, p1), -1)
self.assertEqual(cmp(p0, p0), 0)
self.assertEqual(cmp(p0, p_1), 1)
self.assertEqual(str(p0), "DProxy:0")
self.assertEqual(repr(p0), "DProxy(0)")
p10 = DProxy(range(10))
self.assertFalse(-1 in p10)
for i in range(10):
self.assert_(i in p10)
self.assertFalse(10 in p10)
# Safety test for __cmp__
def unsafecmp(a, b):
try:
a.__class__.__cmp__(a, b)
except TypeError:
pass
else:
self.fail("shouldn't allow %s.__cmp__(%r, %r)" % (
a.__class__, a, b))
unsafecmp(u"123", "123")
unsafecmp("123", u"123")
unsafecmp(1, 1.0)
unsafecmp(1.0, 1)
unsafecmp(1, 1L)
unsafecmp(1L, 1)
def test_recursions(self):
# Testing recursion checks ...
class Letter(str):
def __new__(cls, letter):
if letter == 'EPS':
return str.__new__(cls)
return str.__new__(cls, letter)
def __str__(self):
if not self:
return 'EPS'
return self
# sys.stdout needs to be the original to trigger the recursion bug
import sys
test_stdout = sys.stdout
sys.stdout = test_support.get_original_stdout()
try:
# nothing should actually be printed, this should raise an exception
print Letter('w')
except RuntimeError:
pass
else:
self.fail("expected a RuntimeError for print recursion")
finally:
sys.stdout = test_stdout
# Bug #1202533.
class A(object):
pass
A.__mul__ = types.MethodType(lambda self, x: self * x, None, A)
try:
A()*2
except RuntimeError:
pass
else:
self.fail("expected a RuntimeError")
def test_weakrefs(self):
# Testing weak references...
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
del c
self.assertEqual(r(), None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError, msg:
self.assert_(str(msg).find("weak reference") >= 0)
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
del yes
self.assertEqual(r(), None)
del r
def test_properties(self):
# Testing property...
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertFalse(hasattr(a, "x"))
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
self.assertFalse(hasattr(a, "x"))
self.assertFalse(hasattr(a, "_C__x"))
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertFalse(hasattr(a, "x"))
raw = C.__dict__['x']
self.assert_(isinstance(raw, property))
attrs = dir(raw)
self.assert_("__doc__" in attrs)
self.assert_("fget" in attrs)
self.assert_("fset" in attrs)
self.assert_("fdel" in attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assert_(raw.fget is C.__dict__['getx'])
self.assert_(raw.fset is C.__dict__['setx'])
self.assert_(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except TypeError, msg:
if str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected TypeError msg %r" % (attr, str(msg)))
else:
self.fail("expected TypeError from trying to set readonly %r "
"attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
self.fail("expected ZeroDivisionError from bad property")
class E(object):
def getter(self):
"getter method"
return 0
def setter(self_, value):
"setter method"
pass
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
# this segfaulted in 2.5b2
try:
import _testcapi
except ImportError:
pass
else:
class X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
class C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertFalse(hasattr(c, "foo"))
c.foo = -42
self.assert_(hasattr(c, '_foo'))
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
self.assertFalse(hasattr(c, '_foo'))
self.assertFalse(hasattr(c, "foo"))
class D(C):
@C.foo.deleter
def foo(self):
try:
del self._foo
except AttributeError:
pass
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
del d.foo
del d.foo
class E(object):
@property
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
raise RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=None):
del self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
del e.foo
class F(E):
@E.foo.deleter
def foo(self):
del self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
del f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(d.items()))
self.assertEqual(d, dict(d.iteritems()))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
for badarg in 0, 0L, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
self.fail("no TypeError from dict(%r)" % badarg)
else:
self.fail("no TypeError from dict(%r)" % badarg)
try:
dict({}, {})
except TypeError:
pass
else:
self.fail("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
self.fail("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: self.dict.keys()
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
self.fail("no ValueError from dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2L, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Try classic classes.
class C:
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod', '__doc__', '__module__']
self.assertEqual(dir(C), cstuff)
self.assert_('im_self' in dir(C.Cmethod))
c = C() # c.__doc__ is an odd thing to see here; ditto c.__module__.
self.assertEqual(dir(c), cstuff)
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(dir(c), cstuff + ['cdata', 'cmethod'])
self.assert_('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(dir(A), astuff)
self.assert_('im_self' in dir(A.Amethod))
a = A()
self.assertEqual(dir(a), astuff)
self.assert_('im_self' in dir(a.Amethod))
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(dir(a), astuff + ['adata', 'amethod'])
# The same, but with new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
self.assert_('im_self' in dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
self.assert_('im_self' in dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(interesting(dir(A)), astuff)
self.assert_('im_self' in dir(A.Amethod))
a = A()
self.assertEqual(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod'])
self.assert_('im_self' in dir(a.Amethod))
# Try a module subclass.
import sys
class M(type(sys)):
pass
minstance = M("m")
minstance.b = 2
minstance.a = 1
names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]]
self.assertEqual(names, ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2("m2")
m2instance.b = 2
m2instance.a = 1
self.assertEqual(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
self.assertEqual(dir(None), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
def test_supers(self):
# Testing super...
class A(object):
def meth(self, a):
return "A(%r)" % a
self.assertEqual(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
self.assertEqual(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
self.assertEqual(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super # == mysuper(F, self)
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, 42)")
try:
super(D, C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, C())")
try:
super(D).__get__(12)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(12)")
try:
super(D).__get__(C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(C())")
# Make sure data descriptors can be overridden and accessed via super
# (new feature in Python 2.3)
class DDbase(object):
def getx(self): return 42
x = property(getx)
class DDsub(DDbase):
def getx(self): return "hello"
x = property(getx)
dd = DDsub()
self.assertEqual(dd.x, "hello")
self.assertEqual(super(DDsub, dd).x, 42)
# Ensure that super() lookup of descriptor from classmethod
# works (SF ID# 743627)
class Base(object):
aProp = property(lambda self: "foo")
class Sub(Base):
@classmethod
def test(klass):
return super(Sub,klass).aProp
self.assertEqual(Sub.test(), Base.aProp)
# Verify that super() doesn't allow keyword args
try:
super(Base, kw=1)
except TypeError:
pass
else:
self.assertEqual("super shouldn't accept keyword args")
def test_basic_inheritance(self):
# Testing inheritance from basic types...
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
self.assertEqual(repr(hexint(7) + 9), "0x10")
self.assertEqual(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assert_(int(a).__class__ is int)
self.assertEqual(hash(a), hash(12345))
self.assert_((+a).__class__ is int)
self.assert_((a >> 0).__class__ is int)
self.assert_((a << 0).__class__ is int)
self.assert_((hexint(0) << 12).__class__ is int)
self.assert_((hexint(0) >> 12).__class__ is int)
class octlong(long):
__slots__ = []
def __str__(self):
s = oct(self)
if s[-1] == 'L':
s = s[:-1]
return s
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
self.assertEqual(str(octlong(3) + 5), "010")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
self.assertEqual(str(5 + octlong(3000)), "05675")
a = octlong(12345)
self.assertEqual(a, 12345L)
self.assertEqual(long(a), 12345L)
self.assertEqual(hash(a), hash(12345L))
self.assert_(long(a).__class__ is long)
self.assert_((+a).__class__ is long)
self.assert_((-a).__class__ is long)
self.assert_((-octlong(0)).__class__ is long)
self.assert_((a >> 0).__class__ is long)
self.assert_((a << 0).__class__ is long)
self.assert_((a - 0).__class__ is long)
self.assert_((a * 1).__class__ is long)
self.assert_((a ** 1).__class__ is long)
self.assert_((a // 1).__class__ is long)
self.assert_((1 * a).__class__ is long)
self.assert_((a | 0).__class__ is long)
self.assert_((a ^ 0).__class__ is long)
self.assert_((a & -1L).__class__ is long)
self.assert_((octlong(0) << 12).__class__ is long)
self.assert_((octlong(0) >> 12).__class__ is long)
self.assert_(abs(octlong(0)).__class__ is long)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(long):
pass
a = longclone(1)
self.assert_((a + 0).__class__ is long)
self.assert_((0 + a).__class__ is long)
# Check that negative clones don't segfault
a = longclone(-1)
self.assertEqual(a.__dict__, {})
self.assertEqual(long(a), -1) # self.assert_ PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
def __repr__(self):
return "%.*g" % (self.prec, self)
self.assertEqual(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
self.assert_(float(a).__class__ is float)
self.assertEqual(hash(a), hash(12345.0))
self.assert_((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
self.assertEqual(repr(a), "4j-3")
base = complex(-3, 4)
self.assertEqual(base.__class__, complex)
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
self.assertEqual(repr(a), "4j-3")
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
self.assertEqual(hash(a), hash(base))
self.assertEqual((+a).__class__, complex)
self.assertEqual((a + 0).__class__, complex)
self.assertEqual(a + 0, base)
self.assertEqual((a - 0).__class__, complex)
self.assertEqual(a - 0, base)
self.assertEqual((a * 1).__class__, complex)
self.assertEqual(a * 1, base)
self.assertEqual((a / 1).__class__, complex)
self.assertEqual(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
self.assert_(tuple(a).__class__ is tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
self.assert_(a[:].__class__ is tuple)
self.assert_((a * 1).__class__ is tuple)
self.assert_((a * 0).__class__ is tuple)
self.assert_((a + ()).__class__ is tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
self.assert_(tuple(a).__class__ is tuple)
self.assert_((a + a).__class__ is tuple)
self.assert_((a * 0).__class__ is tuple)
self.assert_((a * 1).__class__ is tuple)
self.assert_((a * 2).__class__ is tuple)
self.assert_(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
self.assert_(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
self.assert_(str(s).__class__ is str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
self.assert_((s + "").__class__ is str)
self.assertEqual(s + "", base)
self.assert_(("" + s).__class__ is str)
self.assertEqual("" + s, base)
self.assert_((s * 0).__class__ is str)
self.assertEqual(s * 0, "")
self.assert_((s * 1).__class__ is str)
self.assertEqual(s * 1, base)
self.assert_((s * 2).__class__ is str)
self.assertEqual(s * 2, base + base)
self.assert_(s[:].__class__ is str)
self.assertEqual(s[:], base)
self.assert_(s[0:0].__class__ is str)
self.assertEqual(s[0:0], "")
self.assert_(s.strip().__class__ is str)
self.assertEqual(s.strip(), base)
self.assert_(s.lstrip().__class__ is str)
self.assertEqual(s.lstrip(), base)
self.assert_(s.rstrip().__class__ is str)
self.assertEqual(s.rstrip(), base)
identitytab = ''.join([chr(i) for i in range(256)])
self.assert_(s.translate(identitytab).__class__ is str)
self.assertEqual(s.translate(identitytab), base)
self.assert_(s.translate(identitytab, "x").__class__ is str)
self.assertEqual(s.translate(identitytab, "x"), base)
self.assertEqual(s.translate(identitytab, "\x00"), "")
self.assert_(s.replace("x", "x").__class__ is str)
self.assertEqual(s.replace("x", "x"), base)
self.assert_(s.ljust(len(s)).__class__ is str)
self.assertEqual(s.ljust(len(s)), base)
self.assert_(s.rjust(len(s)).__class__ is str)
self.assertEqual(s.rjust(len(s)), base)
self.assert_(s.center(len(s)).__class__ is str)
self.assertEqual(s.center(len(s)), base)
self.assert_(s.lower().__class__ is str)
self.assertEqual(s.lower(), base)
class madunicode(unicode):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(u"".join(L))
return self._rev
u = madunicode("ABCDEF")
self.assertEqual(u, u"ABCDEF")
self.assertEqual(u.rev(), madunicode(u"FEDCBA"))
self.assertEqual(u.rev().rev(), madunicode(u"ABCDEF"))
base = u"12345"
u = madunicode(base)
self.assertEqual(unicode(u), base)
self.assert_(unicode(u).__class__ is unicode)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
self.assert_(u.strip().__class__ is unicode)
self.assertEqual(u.strip(), base)
self.assert_(u.lstrip().__class__ is unicode)
self.assertEqual(u.lstrip(), base)
self.assert_(u.rstrip().__class__ is unicode)
self.assertEqual(u.rstrip(), base)
self.assert_(u.replace(u"x", u"x").__class__ is unicode)
self.assertEqual(u.replace(u"x", u"x"), base)
self.assert_(u.replace(u"xy", u"xy").__class__ is unicode)
self.assertEqual(u.replace(u"xy", u"xy"), base)
self.assert_(u.center(len(u)).__class__ is unicode)
self.assertEqual(u.center(len(u)), base)
self.assert_(u.ljust(len(u)).__class__ is unicode)
self.assertEqual(u.ljust(len(u)), base)
self.assert_(u.rjust(len(u)).__class__ is unicode)
self.assertEqual(u.rjust(len(u)), base)
self.assert_(u.lower().__class__ is unicode)
self.assertEqual(u.lower(), base)
self.assert_(u.upper().__class__ is unicode)
self.assertEqual(u.upper(), base)
self.assert_(u.capitalize().__class__ is unicode)
self.assertEqual(u.capitalize(), base)
self.assert_(u.title().__class__ is unicode)
self.assertEqual(u.title(), base)
self.assert_((u + u"").__class__ is unicode)
self.assertEqual(u + u"", base)
self.assert_((u"" + u).__class__ is unicode)
self.assertEqual(u"" + u, base)
self.assert_((u * 0).__class__ is unicode)
self.assertEqual(u * 0, u"")
self.assert_((u * 1).__class__ is unicode)
self.assertEqual(u * 1, base)
self.assert_((u * 2).__class__ is unicode)
self.assertEqual(u * 2, base + base)
self.assert_(u[:].__class__ is unicode)
self.assertEqual(u[:], base)
self.assert_(u[0:0].__class__ is unicode)
self.assertEqual(u[0:0], u"")
class sublist(list):
pass
a = sublist(range(5))
self.assertEqual(a, range(5))
a.append("hello")
self.assertEqual(a, range(5) + ["hello"])
a[5] = 5
self.assertEqual(a, range(6))
a.extend(range(6, 20))
self.assertEqual(a, range(20))
a[-5:] = []
self.assertEqual(a, range(15))
del a[10:15]
self.assertEqual(len(a), 10)
self.assertEqual(a, range(10))
self.assertEqual(list(a), range(10))
self.assertEqual(a[0], 0)
self.assertEqual(a[9], 9)
self.assertEqual(a[-10], 0)
self.assertEqual(a[-1], 9)
self.assertEqual(a[:5], range(5))
class CountedInput(file):
"""Counts lines read by self.readline().
self.lineno is the 0-based ordinal of the last line read, up to
a maximum of one greater than the number of lines in the file.
self.ateof is true if and only if the final "" line has been read,
at which point self.lineno stops incrementing, and further calls
to readline() continue to return "".
"""
lineno = 0
ateof = 0
def readline(self):
if self.ateof:
return ""
s = file.readline(self)
# Next line works too.
# s = super(CountedInput, self).readline()
self.lineno += 1
if s == "":
self.ateof = 1
return s
f = file(name=test_support.TESTFN, mode='w')
lines = ['a\n', 'b\n', 'c\n']
try:
f.writelines(lines)
f.close()
f = CountedInput(test_support.TESTFN)
for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
got = f.readline()
self.assertEqual(expected, got)
self.assertEqual(f.lineno, i)
self.assertEqual(f.ateof, (i > len(lines)))
f.close()
finally:
try:
f.close()
except:
pass
test_support.unlink(test_support.TESTFN)
def test_keywords(self):
# Testing keyword args to basic type constructors ...
self.assertEqual(int(x=1), 1)
self.assertEqual(float(x=2), 2.0)
self.assertEqual(long(x=3), 3L)
self.assertEqual(complex(imag=42, real=666), complex(666, 42))
self.assertEqual(str(object=500), '500')
self.assertEqual(unicode(string='abc', errors='strict'), u'abc')
self.assertEqual(tuple(sequence=range(3)), (0, 1, 2))
self.assertEqual(list(sequence=(0, 1, 2)), range(3))
# note: as of Python 2.3, dict() no longer has an "items" keyword arg
for constructor in (int, float, long, complex, str, unicode,
tuple, list, file):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
self.fail("expected TypeError from bogus keyword argument to %r"
% constructor)
def test_str_subclass_as_dict_key(self):
# Testing a str subclass used as dict key ..
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
self.assertEqual(cistr('ABC'), 'abc')
self.assertEqual('aBc', cistr('ABC'))
self.assertEqual(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
self.assertEqual(d[cistr('one')], 1)
self.assertEqual(d[cistr('tWo')], 2)
self.assertEqual(d[cistr('THrEE')], 3)
self.assert_(cistr('ONe') in d)
self.assertEqual(d.get(cistr('thrEE')), 3)
def test_classic_comparisons(self):
# Testing classic comparisons...
class classic:
pass
for base in (classic, int, object):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self, other):
if isinstance(other, C):
return cmp(self.value, other.value)
if isinstance(other, int) or isinstance(other, long):
return cmp(self.value, other)
return NotImplemented
__hash__ = None # Silence Py3k warning
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
self.assert_(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
for op in "<", "<=", "==", "!=", ">", ">=":
self.assert_(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assert_(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
self.assert_(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
class Z(complex):
pass
z = Z(1)
self.assertEqual(z, 1+0j)
self.assertEqual(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
__hash__ = None # Silence Py3k warning
zz = ZZ(1.0000003)
self.assertEqual(zz, 1+0j)
self.assertEqual(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self_, other):
self.fail("shouldn't call __cmp__")
__hash__ = None # Silence Py3k warning
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, long):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, long):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, long):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, long):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, long):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assert_(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assert_(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assert_(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_coercions(self):
# Testing coercions...
class I(int): pass
coerce(I(0), 0)
coerce(0, I(0))
class L(long): pass
coerce(L(0), 0)
coerce(L(0), 0L)
coerce(0, L(0))
coerce(0L, L(0))
class F(float): pass
coerce(F(0), 0)
coerce(F(0), 0L)
coerce(F(0), 0.)
coerce(0, F(0))
coerce(0L, F(0))
coerce(0., F(0))
class C(complex): pass
coerce(C(0), 0)
coerce(C(0), 0L)
coerce(C(0), 0.)
coerce(C(0), 0j)
coerce(0, C(0))
coerce(0L, C(0))
coerce(0., C(0))
coerce(0j, C(0))
def test_descrdoc(self):
# Testing descriptor doc strings...
def check(descr, what):
self.assertEqual(descr.__doc__, what)
check(file.closed, "True if the file is closed") # getset descriptor
check(file.name, "file name") # member descriptor
def test_doc_descriptor(self):
# Testing __doc__ descriptor...
# SF bug 542984
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass')
self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass')
self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def test_set_class(self):
# Testing __class__ assignment...
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
self.assert_(x.__class__ is cls2)
x.__class__ = cls
self.assert_(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
self.fail("shouldn't allow %r.__class__ = %r" % (x, C))
try:
delattr(x, "__class__")
except TypeError:
pass
else:
self.fail("shouldn't allow del %r.__class__" % x)
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
class Int(int): __slots__ = []
cant(2, Int)
cant(Int(), int)
cant(True, int)
cant(2, bool)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
class G(object):
__slots__ = ["a", "b"]
class H(object):
__slots__ = ["b", "a"]
try:
unicode
except NameError:
class I(object):
__slots__ = ["a", "b"]
else:
class I(object):
__slots__ = [unicode("a"), unicode("b")]
class J(object):
__slots__ = ["c", "b"]
class K(object):
__slots__ = ["a", "b", "d"]
class L(H):
__slots__ = ["e"]
class M(I):
__slots__ = ["e"]
class N(J):
__slots__ = ["__weakref__"]
class P(J):
__slots__ = ["__dict__"]
class Q(J):
pass
class R(J):
__slots__ = ["__dict__", "__weakref__"]
for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)):
x = cls()
x.a = 1
x.__class__ = cls2
self.assert_(x.__class__ is cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
self.assert_(x.__class__ is cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
for cls2 in G, J, K, L, M, N, P, R, list, Int:
if cls is cls2:
continue
cant(cls(), cls2)
def test_set_dict(self):
# Testing __dict__ assignment...
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
self.assertEqual(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict))
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
class Base(object):
pass
def verify_dict_readonly(x):
"""
x has to be an instance of a class inheriting from Base.
"""
cant(x, {})
try:
del x.__dict__
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow del %r.__dict__" % x)
dict_descr = Base.__dict__["__dict__"]
try:
dict_descr.__set__(x, {})
except (AttributeError, TypeError):
pass
else:
self.fail("dict_descr allowed access to %r's dict" % x)
# Classes don't allow __dict__ assignment and have readonly dicts
class Meta1(type, Base):
pass
class Meta2(Base, type):
pass
class D(object):
__metaclass__ = Meta1
class E(object):
__metaclass__ = Meta2
for cls in C, D, E:
verify_dict_readonly(cls)
class_dict = cls.__dict__
try:
class_dict["spam"] = "eggs"
except TypeError:
pass
else:
self.fail("%r's __dict__ can be modified" % cls)
# Modules also disallow __dict__ assignment
class Module1(types.ModuleType, Base):
pass
class Module2(Base, types.ModuleType):
pass
for ModuleType in Module1, Module2:
mod = ModuleType("spam")
verify_dict_readonly(mod)
mod.__dict__["spam"] = "eggs"
# Exception's __dict__ can be replaced, but not deleted
class Exception1(Exception, Base):
pass
class Exception2(Base, Exception):
pass
for ExceptionType in Exception, Exception1, Exception2:
e = ExceptionType()
e.__dict__ = {"a": 1}
self.assertEqual(e.a, 1)
try:
del e.__dict__
except (TypeError, AttributeError):
pass
else:
self.fail("%r's __dict__ can be deleted" % e)
def test_pickles(self):
# Testing pickling and copying new-style classes and objects...
import pickle, cPickle
def sorteditems(d):
L = d.items()
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __getnewargs__(self):
return (self.a, self.b)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __getnewargs__(self):
return (self.a, self.b, int(self))
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for p in pickle, cPickle:
for bin in 0, 1:
for cls in C, C1, C2:
s = p.dumps(cls, bin)
cls2 = p.loads(s)
self.assert_(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = p.dumps((a, b), bin)
x, y = p.loads(s)
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = p.dumps(u, bin)
v = p.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = p.dumps(u, bin)
v = p.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Testing copy.deepcopy()
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
self.assert_(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
def test_pickle_slots(self):
# Testing pickling of classes with __slots__ ...
import pickle, cPickle
# Pickling of classes with __slots__ but without __getstate__ should fail
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C())
except TypeError:
pass
else:
self.fail("should fail: pickle C instance - %s" % base)
try:
cPickle.dumps(C())
except TypeError:
pass
else:
self.fail("should fail: cPickle C instance - %s" % base)
try:
pickle.dumps(C())
except TypeError:
pass
else:
self.fail("should fail: pickle D instance - %s" % base)
try:
cPickle.dumps(D())
except TypeError:
pass
else:
self.fail("should fail: cPickle D instance - %s" % base)
# Give C a nice generic __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
for cls in self.__class__.__mro__:
for sn in cls.__dict__.get('__slots__', ()):
try:
d[sn] = getattr(self, sn)
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in d.items():
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
self.assertEqual(hasattr(y, 'a'), 0)
y = cPickle.loads(cPickle.dumps(x))
self.assertEqual(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, 42)
y = cPickle.loads(cPickle.dumps(x))
self.assertEqual(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a + y.b, 142)
y = cPickle.loads(cPickle.dumps(x))
self.assertEqual(y.a + y.b, 142)
# A subclass that adds a slot should also work
class E(C):
__slots__ = ['b']
x = E()
x.a = 42
x.b = "foo"
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, x.a)
self.assertEqual(y.b, x.b)
y = cPickle.loads(cPickle.dumps(x))
self.assertEqual(y.a, x.a)
self.assertEqual(y.b, x.b)
def test_binary_operator_override(self):
# Testing overrides of binary operations...
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
self.assertEqual(repr(I(1) + I(2)), "I(3)")
self.assertEqual(repr(I(1) + 2), "I(3)")
self.assertEqual(repr(1 + I(2)), "I(3)")
self.assertEqual(repr(I(2) ** I(3)), "I(8)")
self.assertEqual(repr(2 ** I(3)), "I(8)")
self.assertEqual(repr(I(2) ** 3), "I(8)")
self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
__hash__ = None # Silence Py3k warning
def test_subclass_propagation(self):
# Testing propagation of slot functions to subclasses...
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
self.assertEqual(hash(d), 42)
C.__hash__ = lambda self: 314
self.assertEqual(hash(d), 314)
B.__hash__ = lambda self: 144
self.assertEqual(hash(d), 144)
D.__hash__ = lambda self: 100
self.assertEqual(hash(d), 100)
D.__hash__ = None
self.assertRaises(TypeError, hash, d)
del D.__hash__
self.assertEqual(hash(d), 144)
B.__hash__ = None
self.assertRaises(TypeError, hash, d)
del B.__hash__
self.assertEqual(hash(d), 314)
C.__hash__ = None
self.assertRaises(TypeError, hash, d)
del C.__hash__
self.assertEqual(hash(d), 42)
A.__hash__ = None
self.assertRaises(TypeError, hash, d)
del A.__hash__
self.assertEqual(hash(d), orig_hash)
d.foo = 42
d.bar = 42
self.assertEqual(d.foo, 42)
self.assertEqual(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError, name
B.__getattr__ = __getattr__
self.assertEqual(d.spam, "hello")
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
del A.__getattribute__
self.assertEqual(d.foo, 42)
del d.foo
self.assertEqual(d.foo, "hello")
self.assertEqual(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
self.fail("d.foo should be undefined now")
# Test a nasty bug in recurse_down_subclasses()
import gc
class A(object):
pass
class B(A):
pass
del B
gc.collect()
A.__setitem__ = lambda *a: None # crash
def test_buffer_inheritance(self):
# Testing that buffer interface is inherited ...
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
class MyStr(str):
pass
base = 'abc'
m = MyStr(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
# It's not clear that unicode will continue to support the character
# buffer interface, and this test will fail if that's taken away.
class MyUni(unicode):
pass
base = u'abc'
m = MyUni(base)
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
self.fail('subclass of int should not have a buffer interface')
except TypeError:
pass
def test_str_of_str_subclass(self):
# Testing __str__ defined in subclass of str ...
import binascii
import cStringIO
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self)
def __repr__(self):
return self + " repr"
o = octetstring('A')
self.assertEqual(type(o), octetstring)
self.assertEqual(type(str(o)), str)
self.assertEqual(type(repr(o)), str)
self.assertEqual(ord(o), 0x41)
self.assertEqual(str(o), '41')
self.assertEqual(repr(o), 'A repr')
self.assertEqual(o.__str__(), '41')
self.assertEqual(o.__repr__(), 'A repr')
capture = cStringIO.StringIO()
# Calling str() or not exercises different internal paths.
print >> capture, o
print >> capture, str(o)
self.assertEqual(capture.getvalue(), '41\n41\n')
capture.close()
def test_keyword_arguments(self):
# Testing keyword arguments to __init__, __call__...
def f(a): return a
self.assertEqual(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
self.assertEqual(a, [0, 1, 2])
def test_recursive_call(self):
# Testing recursive __call__() by setting to instance of class...
class A(object):
pass
A.__call__ = A()
try:
A()()
except RuntimeError:
pass
else:
self.fail("Recursion limit should have been reached for __call__()")
def test_delete_hook(self):
# Testing __del__ hook...
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
self.assertEqual(log, [])
del c
self.assertEqual(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: self.fail("invalid del() didn't raise TypeError")
def test_hash_inheritance(self):
# Testing hash of mutable subclasses...
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of dict subclass should fail")
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of list subclass should fail")
def test_str_operations(self):
try: 'a' + 5
except TypeError: pass
else: self.fail("'' + 5 doesn't raise TypeError")
try: ''.split('')
except ValueError: pass
else: self.fail("''.split('') doesn't raise ValueError")
try: ''.join([0])
except TypeError: pass
else: self.fail("''.join([0]) doesn't raise TypeError")
try: ''.rindex('5')
except ValueError: pass
else: self.fail("''.rindex('5') doesn't raise ValueError")
try: '%(n)s' % None
except TypeError: pass
else: self.fail("'%(n)s' % None doesn't raise TypeError")
try: '%(n' % {}
except ValueError: pass
else: self.fail("'%(n' % {} '' doesn't raise ValueError")
try: '%*s' % ('abc')
except TypeError: pass
else: self.fail("'%*s' % ('abc') doesn't raise TypeError")
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError")
try: '%s' % (1, 2)
except TypeError: pass
else: self.fail("'%s' % (1, 2) doesn't raise TypeError")
try: '%' % None
except ValueError: pass
else: self.fail("'%' % None doesn't raise ValueError")
self.assertEqual('534253'.isdigit(), 1)
self.assertEqual('534253x'.isdigit(), 0)
self.assertEqual('%c' % 5, '\x05')
self.assertEqual('%c' % '5', '5')
def test_deepcopy_recursive(self):
# Testing deepcopy of recursive objects...
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def test_unintialized_modules(self):
# Testing uninitialized module objects...
from types import ModuleType as M
m = M.__new__(M)
str(m)
self.assertEqual(hasattr(m, "__name__"), 0)
self.assertEqual(hasattr(m, "__file__"), 0)
self.assertEqual(hasattr(m, "foo"), 0)
self.assertEqual(m.__dict__, None)
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
def test_funny_new(self):
# Testing __new__ returning something unexpected...
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
self.assertEqual(C("1"), [1, 2, 3])
self.assertEqual(D("1"), [1, 2, 3])
d = D(None)
self.assertEqual(d.foo, None)
d = C(1)
self.assertEqual(isinstance(d, D), True)
self.assertEqual(d.foo, 1)
d = D(1)
self.assertEqual(isinstance(d, D), True)
self.assertEqual(d.foo, 1)
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
self.assertEqual(y, (x, 1.0))
y = x
y *= 2
self.assertEqual(y, (x, 2))
y = x
y *= 3L
self.assertEqual(y, (x, 3L))
y = x
y *= 1L<<100
self.assertEqual(y, (x, 1L<<100))
y = x
y *= None
self.assertEqual(y, (x, None))
y = x
y *= "foo"
self.assertEqual(y, (x, "foo"))
def test_copy_setstate(self):
# Testing that copy.*copy() correctly uses __setstate__...
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self_, lst):
self.assertEqual(len(lst), 1)
self_.__foo = self_.foo = lst[0]
a = C(42)
a.setfoo(24)
self.assertEqual(a.foo, 24)
self.assertEqual(a.getfoo(), 42)
b = copy.copy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
b = copy.deepcopy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
def test_slices(self):
# Testing cases with slices and overridden __getitem__ ...
# Strings
self.assertEqual("hello"[:4], "hell")
self.assertEqual("hello"[slice(4)], "hell")
self.assertEqual(str.__getitem__("hello", slice(4)), "hell")
class S(str):
def __getitem__(self, x):
return str.__getitem__(self, x)
self.assertEqual(S("hello")[:4], "hell")
self.assertEqual(S("hello")[slice(4)], "hell")
self.assertEqual(S("hello").__getitem__(slice(4)), "hell")
# Tuples
self.assertEqual((1,2,3)[:2], (1,2))
self.assertEqual((1,2,3)[slice(2)], (1,2))
self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2))
class T(tuple):
def __getitem__(self, x):
return tuple.__getitem__(self, x)
self.assertEqual(T((1,2,3))[:2], (1,2))
self.assertEqual(T((1,2,3))[slice(2)], (1,2))
self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2))
# Lists
self.assertEqual([1,2,3][:2], [1,2])
self.assertEqual([1,2,3][slice(2)], [1,2])
self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2])
class L(list):
def __getitem__(self, x):
return list.__getitem__(self, x)
self.assertEqual(L([1,2,3])[:2], [1,2])
self.assertEqual(L([1,2,3])[slice(2)], [1,2])
self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2])
# Now do lists and __setitem__
a = L([1,2,3])
a[slice(1, 3)] = [3,2]
self.assertEqual(a, [1,3,2])
a[slice(0, 2, 1)] = [3,1]
self.assertEqual(a, [3,1,2])
a.__setitem__(slice(1, 3), [2,1])
self.assertEqual(a, [3,2,1])
a.__setitem__(slice(0, 2, 1), [2,3])
self.assertEqual(a, [2,3,1])
def test_subtype_resurrection(self):
# Testing resurrection of new-style instance...
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to flawed
# GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1 bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again "during"
# the del.
del C.container[-1]
self.assertEqual(len(C.container), 1)
self.assertEqual(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def test_slots_trash(self):
# Testing slot trash...
# Deallocating deeply nested slotted trash caused stack overflows
class trash(object):
__slots__ = ['x']
def __init__(self, x):
self.x = x
o = None
for i in xrange(50000):
o = trash(o)
del o
def test_slots_multiple_inheritance(self):
# SF bug 575229, multiple inheritance w/ slots dumps core
class A(object):
__slots__=()
class B(object):
pass
class C(A,B) :
__slots__=()
self.assertEqual(C.__basicsize__, B.__basicsize__)
self.assert_(hasattr(C, '__dict__'))
self.assert_(hasattr(C, '__weakref__'))
C().x = 2
def test_rmul(self):
# Testing correct invocation of __rmul__...
# SF patch 592646
class C(object):
def __mul__(self, other):
return "mul"
def __rmul__(self, other):
return "rmul"
a = C()
self.assertEqual(a*2, "mul")
self.assertEqual(a*2.2, "mul")
self.assertEqual(2*a, "rmul")
self.assertEqual(2.2*a, "rmul")
def test_ipow(self):
# Testing correct invocation of __ipow__...
# [SF bug 620179]
class C(object):
def __ipow__(self, other):
pass
a = C()
a **= 2
def test_mutable_bases(self):
# Testing mutable bases...
# stuff that should work:
class C(object):
pass
class C2(object):
def __getattribute__(self, attr):
if attr == 'a':
return 2
else:
return super(C2, self).__getattribute__(attr)
def meth(self):
return 1
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
self.assertEqual(d.meth(), 1)
self.assertEqual(e.meth(), 1)
self.assertEqual(d.a, 2)
self.assertEqual(e.a, 2)
self.assertEqual(C2.__subclasses__(), [D])
# stuff that shouldn't:
class L(list):
pass
try:
L.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't turn list subclass into dict subclass")
try:
list.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't be able to assign to list.__bases__")
try:
D.__bases__ = (C2, list)
except TypeError:
pass
else:
assert 0, "best_base calculation found wanting"
try:
del D.__bases__
except TypeError:
pass
else:
self.fail("shouldn't be able to delete .__bases__")
try:
D.__bases__ = ()
except TypeError, msg:
if str(msg) == "a new-style class can't have only classic bases":
self.fail("wrong error message for .__bases__ = ()")
else:
self.fail("shouldn't be able to set .__bases__ to ()")
try:
D.__bases__ = (D,)
except TypeError:
pass
else:
# actually, we'll have crashed by here...
self.fail("shouldn't be able to create inheritance cycles")
try:
D.__bases__ = (C, C)
except TypeError:
pass
else:
self.fail("didn't detect repeated base classes")
try:
D.__bases__ = (E,)
except TypeError:
pass
else:
self.fail("shouldn't be able to create inheritance cycles")
# let's throw a classic class into the mix:
class Classic:
def meth2(self):
return 3
D.__bases__ = (C, Classic)
self.assertEqual(d.meth2(), 3)
self.assertEqual(e.meth2(), 3)
try:
d.a
except AttributeError:
pass
else:
self.fail("attribute should have vanished")
try:
D.__bases__ = (Classic,)
except TypeError:
pass
else:
self.fail("new-style class must have a new-style base")
def test_mutable_bases_with_failing_mro(self):
# Testing mutable bases with failing mro...
class WorkOnce(type):
def __new__(self, name, bases, ns):
self.flag = 0
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(self):
if self.flag > 0:
raise RuntimeError, "bozo"
else:
self.flag += 1
return type.mro(self)
class WorkAlways(type):
def mro(self):
# this is here to make sure that .mro()s aren't called
# with an exception set (which was possible at one point).
# An error message will be printed in a debug build.
# What's a good way to test for this?
return type.mro(self)
class C(object):
pass
class C2(object):
pass
class D(C):
pass
class E(D):
pass
class F(D):
__metaclass__ = WorkOnce
class G(D):
__metaclass__ = WorkAlways
# Immediate subclasses have their mro's adjusted in alphabetical
# order, so E's will get adjusted before adjusting F's fails. We
# check here that E's gets restored.
E_mro_before = E.__mro__
D_mro_before = D.__mro__
try:
D.__bases__ = (C2,)
except RuntimeError:
self.assertEqual(E.__mro__, E_mro_before)
self.assertEqual(D.__mro__, D_mro_before)
else:
self.fail("exception not propagated")
def test_mutable_bases_catch_mro_conflict(self):
# Testing mutable bases catch mro conflict...
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
try:
C.__bases__ = (B, A)
except TypeError:
pass
else:
self.fail("didn't catch MRO conflict")
def test_mutable_names(self):
# Testing mutable names...
class C(object):
pass
# C.__module__ could be 'test_descr' or '__main__'
mod = C.__module__
C.__name__ = 'D'
self.assertEqual((C.__module__, C.__name__), (mod, 'D'))
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __floordiv__(self, other):
return "B.__floordiv__"
def __rfloordiv__(self, other):
return "B.__rfloordiv__"
self.assertEqual(B(1) // 1, "B.__floordiv__")
self.assertEqual(1 // B(1), "B.__rfloordiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __floordiv__(self, other):
return "C.__floordiv__"
def __rfloordiv__(self, other):
return "C.__rfloordiv__"
self.assertEqual(C() // 1, "C.__floordiv__")
self.assertEqual(1 // C(), "C.__rfloordiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __floordiv__(self, other):
return "D.__floordiv__"
def __rfloordiv__(self, other):
return "D.__rfloordiv__"
self.assertEqual(D() // C(), "D.__floordiv__")
self.assertEqual(C() // D(), "D.__rfloordiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
self.assertEqual(E.__rfloordiv__, C.__rfloordiv__)
self.assertEqual(E() // 1, "C.__floordiv__")
self.assertEqual(1 // E(), "C.__rfloordiv__")
self.assertEqual(E() // C(), "C.__floordiv__")
self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail
def test_meth_class_get(self):
# Testing __get__ method of METH_CLASS C methods...
# Full coverage of descrobject.c::classmethod_get()
# Baseline
arg = [1, 2, 3]
res = {1: None, 2: None, 3: None}
self.assertEqual(dict.fromkeys(arg), res)
self.assertEqual({}.fromkeys(arg), res)
# Now get the descriptor
descr = dict.__dict__["fromkeys"]
# More baseline using the descriptor directly
self.assertEqual(descr.__get__(None, dict)(arg), res)
self.assertEqual(descr.__get__({})(arg), res)
# Now check various error cases
try:
descr.__get__(None, None)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, None)")
try:
descr.__get__(42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(42)")
try:
descr.__get__(None, 42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, 42)")
try:
descr.__get__(None, int)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, int)")
def test_isinst_isclass(self):
# Testing proxy isinstance() and isclass()...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
# Test with a classic class
class C:
pass
a = C()
pa = Proxy(a)
self.assert_(isinstance(a, C)) # Baseline
self.assert_(isinstance(pa, C)) # Test
# Test with a classic subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assert_(isinstance(a, C)) # Baseline
self.assert_(isinstance(pa, C)) # Test
# Test with a new-style class
class C(object):
pass
a = C()
pa = Proxy(a)
self.assert_(isinstance(a, C)) # Baseline
self.assert_(isinstance(pa, C)) # Test
# Test with a new-style subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assert_(isinstance(a, C)) # Baseline
self.assert_(isinstance(pa, C)) # Test
def test_proxy_super(self):
# Testing super() for a proxy object...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
class B(object):
def f(self):
return "B.f"
class C(B):
def f(self):
return super(C, self).f() + "->C.f"
obj = C()
p = Proxy(obj)
self.assertEqual(C.__dict__["f"](p), "B.f->C.f")
def test_carloverre(self):
# Testing prohibition of Carlo Verre's hack...
try:
object.__setattr__(str, "foo", 42)
except TypeError:
pass
else:
self.fail("Carlo Verre __setattr__ suceeded!")
try:
object.__delattr__(str, "lower")
except TypeError:
pass
else:
self.fail("Carlo Verre __delattr__ succeeded!")
def test_weakref_segfault(self):
# Testing weakref segfault...
# SF 742911
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def test_wrapper_segfault(self):
# SF 927248: deeply nested wrappers could cause stack overflow
f = lambda:None
for i in xrange(1000000):
f = f.__call__
f = None
def test_file_fault(self):
# Testing sys.stdout is changed in getattr...
import sys
class StdoutGuard:
def __getattr__(self, attr):
sys.stdout = sys.__stdout__
raise RuntimeError("Premature access to sys.stdout.%s" % attr)
sys.stdout = StdoutGuard()
try:
print "Oops!"
except RuntimeError:
pass
def test_vicious_descriptor_nonsense(self):
# Testing vicious_descriptor_nonsense...
# A potential segfault spotted by Thomas Wouters in mail to
# python-dev 2003-04-17, turned into an example & fixed by Michael
# Hudson just less than four months later...
class Evil(object):
def __hash__(self):
return hash('attr')
def __eq__(self, other):
del C.attr
return 0
class Descr(object):
def __get__(self, ob, type=None):
return 1
class C(object):
attr = Descr()
c = C()
c.__dict__[Evil()] = 0
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
import gc; gc.collect()
self.assertEqual(hasattr(c, 'attr'), False)
def test_init(self):
# SF 1155938
class Foo(object):
def __init__(self):
return 10
try:
Foo()
except TypeError:
pass
else:
self.fail("did not test __init__() for None return")
def test_method_wrapper(self):
# Testing method-wrapper objects...
# <type 'method-wrapper'> did not support any reflection before 2.5
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
self.assert_(l.__add__ != [5].__add__)
self.assert_(l.__add__ != l.__mul__)
self.assert_(l.__add__.__name__ == '__add__')
self.assert_(l.__add__.__self__ is l)
self.assert_(l.__add__.__objclass__ is list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
except TypeError:
pass
else:
self.fail("no TypeError from hash([].__add__)")
t = ()
t += (7,)
self.assertEqual(t.__add__, (7,).__add__)
self.assertEqual(hash(t.__add__), hash((7,).__add__))
def test_not_implemented(self):
# Testing NotImplemented...
# all binary methods should be able to return a NotImplemented
import sys
import types
import operator
def specialmethod(self, other):
return NotImplemented
def check(expr, x, y):
try:
exec expr in {'x': x, 'y': y, 'operator': operator}
except TypeError:
pass
else:
self.fail("no TypeError from %r" % (expr,))
N1 = sys.maxint + 1L # might trigger OverflowErrors instead of
# TypeErrors
N2 = sys.maxint # if sizeof(int) < sizeof(long), might trigger
# ValueErrors instead of TypeErrors
for metaclass in [type, types.ClassType]:
for name, expr, iexpr in [
('__add__', 'x + y', 'x += y'),
('__sub__', 'x - y', 'x -= y'),
('__mul__', 'x * y', 'x *= y'),
('__truediv__', 'operator.truediv(x, y)', None),
('__floordiv__', 'operator.floordiv(x, y)', None),
('__div__', 'x / y', 'x /= y'),
('__mod__', 'x % y', 'x %= y'),
('__divmod__', 'divmod(x, y)', None),
('__pow__', 'x ** y', 'x **= y'),
('__lshift__', 'x << y', 'x <<= y'),
('__rshift__', 'x >> y', 'x >>= y'),
('__and__', 'x & y', 'x &= y'),
('__or__', 'x | y', 'x |= y'),
('__xor__', 'x ^ y', 'x ^= y'),
('__coerce__', 'coerce(x, y)', None)]:
if name == '__coerce__':
rname = name
else:
rname = '__r' + name[2:]
A = metaclass('A', (), {name: specialmethod})
B = metaclass('B', (), {rname: specialmethod})
a = A()
b = B()
check(expr, a, a)
check(expr, a, b)
check(expr, b, a)
check(expr, b, b)
check(expr, a, N1)
check(expr, a, N2)
check(expr, N1, b)
check(expr, N2, b)
if iexpr:
check(iexpr, a, a)
check(iexpr, a, b)
check(iexpr, b, a)
check(iexpr, b, b)
check(iexpr, a, N1)
check(iexpr, a, N2)
iname = '__i' + name[2:]
C = metaclass('C', (), {iname: specialmethod})
c = C()
check(iexpr, c, a)
check(iexpr, c, b)
check(iexpr, c, N1)
check(iexpr, c, N2)
def test_assign_slice(self):
# ceval.c's assign_slice used to check for
# tp->tp_as_sequence->sq_slice instead of
# tp->tp_as_sequence->sq_ass_slice
class C(object):
def __setslice__(self, start, stop, value):
self.value = value
c = C()
c[1:2] = 3
self.assertEqual(c.value, 3)
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
def meth(self):
pass
self.C = C
def test_iter_keys(self):
# Testing dict-proxy iterkeys...
keys = [ key for key in self.C.__dict__.iterkeys() ]
keys.sort()
self.assertEquals(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_iter_values(self):
# Testing dict-proxy itervalues...
values = [ values for values in self.C.__dict__.itervalues() ]
self.assertEqual(len(values), 5)
def test_iter_items(self):
# Testing dict-proxy iteritems...
keys = [ key for (key, value) in self.C.__dict__.iteritems() ]
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_dict_type_with_metaclass(self):
# Testing type of __dict__ when __metaclass__ set...
class B(object):
pass
class M(type):
pass
class C:
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
__metaclass__ = M
self.assertEqual(type(C.__dict__), type(B.__dict__))
class PTypesLongInitTest(unittest.TestCase):
# This is in its own TestCase so that it can be run before any other tests.
def test_pytype_long_ready(self):
# Testing SF bug 551412 ...
# This dumps core when SF bug 551412 isn't fixed --
# but only when test_descr.py is run separately.
# (That can't be helped -- as soon as PyType_Ready()
# is called for PyLong_Type, the bug is gone.)
class UserLong(object):
def __pow__(self, *args):
pass
try:
pow(0L, UserLong(), 0L)
except:
pass
# Another segfault only when run early
# (before PyType_Ready(tuple) is called)
type.mro(tuple)
def test_main():
# Run all local test cases, with PTypesLongInitTest first.
test_support.run_unittest(PTypesLongInitTest, OperatorsTest,
ClassPropertiesAndMethods, DictProxyTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
skg-net/ansible | lib/ansible/modules/storage/netapp/na_ontap_lun_map.py | 4 | 8502 | #!/usr/bin/python
""" this is lun mapping module
(c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: na_ontap_lun_map
short_description: Manage NetApp ONTAP lun maps
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Map and unmap luns on NetApp ONTAP.
options:
state:
description:
- Whether the specified lun should exist or not.
choices: ['present', 'absent']
default: present
initiator_group_name:
description:
- Initiator group to map to the given LUN.
required: true
path:
description:
- Path of the LUN..
required: true
vserver:
required: true
description:
- The name of the vserver to use.
lun_id:
description:
- LUN ID assigned for the map.
"""
EXAMPLES = """
- name: Create lun mapping
na_ontap_lun_map:
state: present
initiator_group_name: ansibleIgroup3234
path: /vol/iscsi_path/iscsi_lun
vserver: ci_dev
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Unmap Lun
na_ontap_lun_map:
state: absent
initiator_group_name: ansibleIgroup3234
path: /vol/iscsi_path/iscsi_lun
vserver: ci_dev
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
lun_node:
description: NetApp controller that is hosting the LUN.
returned: success
type: string
sample: node01
lun_ostype:
description: Specifies the OS of the host accessing the LUN.
returned: success
type: string
sample: vmware
lun_serial:
description: A unique, 12-byte, ASCII string used to identify the LUN.
returned: success
type: string
sample: 80E7/]LZp1Tt
lun_naa_id:
description: The Network Address Authority (NAA) identifier for the LUN.
returned: success
type: string
sample: 600a0980383045372f5d4c5a70315474
lun_state:
description: Online or offline tatus of the LUN.
returned: success
type: string
sample: online
lun_size:
description: Size of the LUN in bytes.
returned: success
type: int
sample: 2199023255552
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapLUNMap(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
initiator_group_name=dict(required=True, type='str'),
path=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
lun_id=dict(required=False, type='str', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['path'])
],
supports_check_mode=True
)
self.result = dict(
changed=False,
)
p = self.module.params
# set up state variables
self.state = p['state']
self.initiator_group_name = p['initiator_group_name']
self.path = p['path']
self.vserver = p['vserver']
self.lun_id = p['lun_id']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def get_lun_map(self):
"""
Return details about the LUN map
:return: Details about the lun map
:rtype: dict
"""
lun_info = netapp_utils.zapi.NaElement('lun-map-list-info')
lun_info.add_new_child('path', self.path)
result = self.server.invoke_successfully(lun_info, True)
return_value = None
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
initiator_group_name = igroup_info.get_child_content('initiator-group-name')
lun_id = igroup_info.get_child_content('lun-id')
if initiator_group_name == self.initiator_group_name:
return_value = {
'lun_id': lun_id
}
break
return return_value
def get_lun(self):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
# build the lun query
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('path', self.path)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_query = netapp_utils.zapi.NaElement('lun-get-iter')
lun_query.add_child_elem(query)
# find lun using query
result = self.server.invoke_successfully(lun_query, True)
return_value = None
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info')
# extract and assign lun infomation to return value
return_value = {
'lun_node': lun.get_child_content('node'),
'lun_ostype': lun.get_child_content('multiprotocol-type'),
'lun_serial': lun.get_child_content('serial-number'),
'lun_naa_id': '600a0980' + lun.get_child_content('serial-number').encode('hex'),
'lun_state': lun.get_child_content('state'),
'lun_size': lun.get_child_content('size'),
}
return return_value
def create_lun_map(self):
"""
Create LUN map
"""
options = {'path': self.path, 'initiator-group': self.initiator_group_name}
if self.lun_id is not None:
options['lun-id'] = self.lun_id
lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options)
try:
self.server.invoke_successfully(lun_map_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" %
(self.path, self.initiator_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_lun_map(self):
"""
Unmap LUN map
"""
lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{'path': self.path, 'initiator-group': self.initiator_group_name})
try:
self.server.invoke_successfully(lun_map_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" %
(self.path, self.initiator_group_name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
netapp_utils.ems_log_event("na_ontap_lun_map", self.server)
lun_details = self.get_lun()
lun_map_details = self.get_lun_map()
if self.state == 'present' and lun_details:
self.result.update(lun_details)
if self.state == 'present' and not lun_map_details:
self.result['changed'] = True
if not self.module.check_mode:
self.create_lun_map()
elif self.state == 'absent' and lun_map_details:
self.result['changed'] = True
if not self.module.check_mode:
self.delete_lun_map()
self.module.exit_json(**self.result)
def main():
v = NetAppOntapLUNMap()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
chuan9/chromium-crosswalk | tools/win/split_link/install_split_link.py | 146 | 3104 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import _winreg
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def IsExe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def FindInPath(program):
fpath, _ = os.path.split(program)
if fpath:
if IsExe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if not path or not os.path.isabs(path):
continue
if IsExe(exe_file):
return exe_file
return None
def EscapeForCommandLineAndCString(path):
"""Quoted sufficiently to be passed on the compile command line as a define
to be turned into a string in the target C program."""
path = '"' + path + '"'
return path.replace('\\', '\\\\').replace('"', '\\"')
def main():
# Switch to our own dir.
os.chdir(BASE_DIR)
link = FindInPath('link.exe')
mt = FindInPath('mt.exe')
if not link or not mt:
print("Couldn't find link.exe or mt.exe in PATH. "
"Must run from Administrator Visual Studio Command Prompt.")
return 1
link_backup = os.path.join(os.path.split(link)[0], 'link.exe.split_link.exe')
# Don't re-backup link.exe, so only copy link.exe to backup if it's
# not there already.
if not os.path.exists(link_backup):
try:
print 'Saving original link.exe...'
shutil.copyfile(link, link_backup)
except IOError:
print(("Wasn't able to back up %s to %s. "
"Not running with Administrator privileges?")
% (link, link_backup))
return 1
# Build our linker shim.
print 'Building split_link.exe...'
split_link_py = os.path.abspath('split_link.py')
script_path = EscapeForCommandLineAndCString(split_link_py)
python = EscapeForCommandLineAndCString(sys.executable)
subprocess.check_call('cl.exe /nologo /Ox /Zi /W4 /WX /D_UNICODE /DUNICODE'
' /D_CRT_SECURE_NO_WARNINGS /EHsc split_link.cc'
' /DPYTHON_PATH="%s"'
' /DSPLIT_LINK_SCRIPT_PATH="%s"'
' /link shell32.lib shlwapi.lib /out:split_link.exe' % (
python, script_path))
# Copy shim into place.
print 'Copying split_link.exe over link.exe...'
try:
shutil.copyfile('split_link.exe', link)
_winreg.SetValue(_winreg.HKEY_CURRENT_USER,
'Software\\Chromium\\split_link_installed',
_winreg.REG_SZ,
link_backup)
_winreg.SetValue(_winreg.HKEY_CURRENT_USER,
'Software\\Chromium\\split_link_mt_path',
_winreg.REG_SZ,
mt)
except IOError:
print("Wasn't able to copy split_link.exe over %s. "
"Not running with Administrator privileges?" % link)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.